source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
convolutiondepthwise_5x5_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
#if __aarch64__
const int w = bottom_blob.w;
#endif
const int outw = top_blob.w;
const int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
const unsigned short* kptr = kernel.row<const unsigned short>(g);
unsigned short* outptr0 = out.row<unsigned short>(0);
const Mat img0 = bottom_blob.channel(g);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
#if __aarch64__
unsigned short* outptr1 = out.row<unsigned short>(1);
const unsigned short* r5 = img0.row<const unsigned short>(5);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
// 4 * 25
uint16x8_t _k00_01 = vld1q_u16(kptr);
uint16x8_t _k02_03 = vld1q_u16(kptr + 8);
uint16x8_t _k04_10 = vld1q_u16(kptr + 16);
uint16x8_t _k11_12 = vld1q_u16(kptr + 24);
uint16x8_t _k13_14 = vld1q_u16(kptr + 32);
uint16x8_t _k20_21 = vld1q_u16(kptr + 40);
uint16x8_t _k22_23 = vld1q_u16(kptr + 48);
uint16x8_t _k24_30 = vld1q_u16(kptr + 56);
uint16x8_t _k31_32 = vld1q_u16(kptr + 64);
uint16x8_t _k33_34 = vld1q_u16(kptr + 72);
uint16x8_t _k40_41 = vld1q_u16(kptr + 80);
uint16x8_t _k42_43 = vld1q_u16(kptr + 88);
uint16x4_t _k44 = vld1_u16(kptr + 96);
#else // __aarch64__
float bias0_data[4];
if (bias)
{
bias0_data[0] = bias[g * 4 + 0];
bias0_data[1] = bias[g * 4 + 1];
bias0_data[2] = bias[g * 4 + 2];
bias0_data[3] = bias[g * 4 + 3];
}
else
{
bias0_data[0] = 0.f;
bias0_data[1] = 0.f;
bias0_data[2] = 0.f;
bias0_data[3] = 0.f;
}
const float* bias0_data_ptr = bias0_data;
#endif // __aarch64__
int i = 0;
#if __aarch64__
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r10 r11 r12 r13
"shll2 v14.4s, %18.8h, #16 \n"
"mov v24.16b, %29.16b \n" // sum00
"mov v25.16b, %29.16b \n" // sum01
"mov v26.16b, %29.16b \n" // sum02
"mov v27.16b, %29.16b \n" // sum03
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"mov v28.16b, %29.16b \n" // sum10
"mov v29.16b, %29.16b \n" // sum11
"mov v30.16b, %29.16b \n" // sum12
"mov v31.16b, %29.16b \n" // sum13
"shll v15.4s, %16.4h, #16 \n"
"fmla v24.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v25.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v26.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3] \n" // r14 r15 r16 r17
"fmla v27.4s, v14.4s, v19.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v24.4s, v14.4s, v17.4s \n"
"fmla v25.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v26.4s, v14.4s, v19.4s \n"
"fmla v27.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %19.8h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll v15.4s, %17.4h, #16 \n"
"fmla v24.4s, v14.4s, v18.4s \n"
"fmla v25.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v26.4s, v14.4s, v20.4s \n"
"fmla v27.4s, v14.4s, v21.4s \n"
"shll v14.4s, %20.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v24.4s, v14.4s, v19.4s \n"
"fmla v25.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v26.4s, v14.4s, v21.4s \n"
"fmla v27.4s, v14.4s, v22.4s \n"
"shll2 v14.4s, %20.8h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r20 r21 r22 r23
"fmla v31.4s, v15.4s, v22.4s \n"
"shll v15.4s, %18.4h, #16 \n"
"fmla v24.4s, v14.4s, v20.4s \n"
"fmla v25.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v26.4s, v14.4s, v22.4s \n"
"fmla v27.4s, v14.4s, v23.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v24.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v25.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v26.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n" // r24 r25 r26 r27
"fmla v27.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %21.8h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %19.4h, #16 \n"
"fmla v24.4s, v14.4s, v17.4s \n"
"fmla v25.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v26.4s, v14.4s, v19.4s \n"
"fmla v27.4s, v14.4s, v20.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v24.4s, v14.4s, v18.4s \n"
"fmla v25.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v26.4s, v14.4s, v20.4s \n"
"fmla v27.4s, v14.4s, v21.4s \n"
"shll2 v14.4s, %22.8h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll v15.4s, %20.4h, #16 \n"
"fmla v24.4s, v14.4s, v19.4s \n"
"fmla v25.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v26.4s, v14.4s, v21.4s \n"
"fmla v27.4s, v14.4s, v22.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r30 r31 r32 r33
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v24.4s, v14.4s, v20.4s \n"
"fmla v25.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v26.4s, v14.4s, v22.4s \n"
"fmla v27.4s, v14.4s, v23.4s \n"
"shll2 v14.4s, %23.8h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll v15.4s, %21.4h, #16 \n"
"fmla v24.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v25.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v26.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5] \n" // r34 r35 r36 r37
"fmla v27.4s, v14.4s, v19.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v24.4s, v14.4s, v17.4s \n"
"fmla v25.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v26.4s, v14.4s, v19.4s \n"
"fmla v27.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %24.8h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll v15.4s, %22.4h, #16 \n"
"fmla v24.4s, v14.4s, v18.4s \n"
"fmla v25.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v26.4s, v14.4s, v20.4s \n"
"fmla v27.4s, v14.4s, v21.4s \n"
"shll v14.4s, %25.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %22.8h, #16 \n"
"fmla v24.4s, v14.4s, v19.4s \n"
"fmla v25.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v26.4s, v14.4s, v21.4s \n"
"fmla v27.4s, v14.4s, v22.4s \n"
"shll2 v14.4s, %25.8h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%6], #32 \n" // r40 r41 r42 r43
"fmla v31.4s, v15.4s, v22.4s \n"
"shll v15.4s, %23.4h, #16 \n"
"fmla v24.4s, v14.4s, v20.4s \n"
"fmla v25.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v26.4s, v14.4s, v22.4s \n"
"fmla v27.4s, v14.4s, v23.4s \n"
"shll v14.4s, %26.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v24.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v25.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v26.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%6] \n" // r44 r45 r46 r47
"fmla v27.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %26.8h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %24.4h, #16 \n"
"fmla v24.4s, v14.4s, v17.4s \n"
"fmla v25.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v26.4s, v14.4s, v19.4s \n"
"fmla v27.4s, v14.4s, v20.4s \n"
"shll v14.4s, %27.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %24.8h, #16 \n"
"fmla v24.4s, v14.4s, v18.4s \n"
"fmla v25.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v26.4s, v14.4s, v20.4s \n"
"fmla v27.4s, v14.4s, v21.4s \n"
"shll2 v14.4s, %27.8h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll v15.4s, %25.4h, #16 \n"
"fmla v24.4s, v14.4s, v19.4s \n"
"fmla v25.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v26.4s, v14.4s, v21.4s \n"
"fmla v27.4s, v14.4s, v22.4s \n"
"shll v14.4s, %28.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r00 r01 r02 r03
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %25.8h, #16 \n"
"fmla v24.4s, v14.4s, v20.4s \n"
"fmla v25.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v26.4s, v14.4s, v22.4s \n"
"fmla v27.4s, v14.4s, v23.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v24.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v25.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v26.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n" // r04 r05 r06 r07
"fmla v27.4s, v14.4s, v19.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v24.4s, v15.4s, v17.4s \n"
"fmla v25.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v26.4s, v15.4s, v19.4s \n"
"fmla v27.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v24.4s, v14.4s, v18.4s \n"
"fmla v25.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v26.4s, v14.4s, v20.4s \n"
"fmla v27.4s, v14.4s, v21.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v24.4s, v15.4s, v19.4s \n"
"fmla v25.4s, v15.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v26.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%7], #32 \n" // r50 r51 r52 r53
"fmla v27.4s, v15.4s, v22.4s \n"
"shll v15.4s, %26.4h, #16 \n"
"fmla v24.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v25.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v26.4s, v14.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v27.4s, v14.4s, v23.4s \n"
"shll2 v14.4s, %26.8h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%7] \n" // r54 r55 r56 r57
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %27.4h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"fmla v31.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %27.8h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll v15.4s, %28.4h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v21.4s \n"
"fmla v31.4s, v14.4s, v22.4s \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5) // %7
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"w"(_k00_01), // %16
"w"(_k02_03), // %17
"w"(_k04_10), // %18
"w"(_k11_12), // %19
"w"(_k13_14), // %20
"w"(_k20_21), // %21
"w"(_k22_23), // %22
"w"(_k24_30), // %23
"w"(_k31_32), // %24
"w"(_k33_34), // %25
"w"(_k40_41), // %26
"w"(_k42_43), // %27
"w"(_k44), // %28
"w"(_bias0) // %29
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v16.4h, v17.4h}, [%3], #16 \n" // r10 r11
"shll2 v14.4s, %18.8h, #16 \n"
"mov v28.16b, %29.16b \n" // sum00
"mov v29.16b, %29.16b \n" // sum01
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"mov v30.16b, %29.16b \n" // sum10
"mov v31.16b, %29.16b \n" // sum11
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%3] \n" // r12 r13 r14 r15
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v15.4s, %16.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll2 v14.4s, %19.8h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll v15.4s, %17.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4h, v17.4h}, [%4], #16 \n" // r20 r21
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %20.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %20.8h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll v15.4s, %18.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%4] \n" // r22 r23 r24 r25
"fmla v28.4s, v14.4s, v16.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll2 v14.4s, %21.8h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll v15.4s, %19.4h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4h, v17.4h}, [%5], #16 \n" // r30 r31
"fmla v29.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %22.8h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %20.4h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll2 v14.4s, %23.8h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%5] \n" // r32 r33 r34 r35
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v15.4s, %21.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll2 v14.4s, %24.8h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll v15.4s, %22.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v16.4h, v17.4h}, [%6], #16 \n" // r40 r41
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %25.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %22.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %25.8h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll v15.4s, %23.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v14.4s, %26.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%6] \n" // r42 r43 r44 r45
"fmla v28.4s, v14.4s, v16.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll2 v14.4s, %26.8h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll v15.4s, %24.4h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %27.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %24.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v16.4h, v17.4h}, [%2], #16 \n" // r00 r01
"fmla v29.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %27.8h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %25.4h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v22.4h, v23.4h}, [%7], #16 \n" // r50 r51
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %28.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %25.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%2] \n" // r02 r03 r04 r05
"shll v23.4s, v23.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v15.4s, %26.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%7] \n" // r52 r53 r54 r55
"shll2 v14.4s, %16.8h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %26.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v30.4s, v15.4s, v23.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v24.4s \n"
"shll v15.4s, %27.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %17.8h, #16 \n"
"fmla v30.4s, v15.4s, v24.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v25.4s \n"
"shll2 v15.4s, %27.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v30.4s, v15.4s, v25.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v26.4s \n"
"shll v15.4s, %28.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v30.4s, v15.4s, v26.4s \n"
"fmla v31.4s, v15.4s, v27.4s \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v28.4h, v29.4h}, [%0], #16 \n"
"st1 {v30.4h, v31.4h}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5) // %7
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"w"(_k00_01), // %16
"w"(_k02_03), // %17
"w"(_k04_10), // %18
"w"(_k11_12), // %19
"w"(_k13_14), // %20
"w"(_k20_21), // %21
"w"(_k22_23), // %22
"w"(_k24_30), // %23
"w"(_k31_32), // %24
"w"(_k33_34), // %25
"w"(_k40_41), // %26
"w"(_k42_43), // %27
"w"(_k44), // %28
"w"(_bias0) // %29
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v16.4h}, [%3], #8 \n" // r10
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%3] \n" // r11 r12 r13 r14
"mov v30.16b, %29.16b \n" // sum00
"mov v31.16b, %29.16b \n" // sum10
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll2 v14.4s, %18.8h, #16 \n"
"shll v15.4s, %16.4h, #16 \n"
"fmul v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmul v29.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v30.4s, v14.4s, v17.4s \n"
"shll2 v14.4s, %19.8h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll v15.4s, %17.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v14.4s, %20.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %20.8h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %18.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v16.4h}, [%4], #8 \n" // r20
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%4] \n" // r21 r22 r23 r24
"shll2 v15.4s, %18.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v16.4s \n"
"shll2 v14.4s, %21.8h, #16 \n"
"fmla v31.4s, v15.4s, v16.4s \n"
"shll v15.4s, %19.4h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"shll2 v14.4s, %22.8h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll v15.4s, %20.4h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"shll2 v14.4s, %23.8h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v16.4h}, [%5], #8 \n" // r30
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%5] \n" // r31 r32 r33 r34
"shll v15.4s, %21.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v29.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v30.4s, v14.4s, v17.4s \n"
"shll2 v14.4s, %24.8h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll v15.4s, %22.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v14.4s, %25.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %22.8h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"shll2 v14.4s, %25.8h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll v15.4s, %23.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v14.4s, %26.4h, #16 \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%6, #64] \n"
"ld1 {v16.4h}, [%6], #8 \n" // r40
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%6] \n" // r41 r42 r43 r44
"shll2 v15.4s, %23.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v16.4s \n"
"shll2 v14.4s, %26.8h, #16 \n"
"fmla v31.4s, v15.4s, v16.4s \n"
"shll v15.4s, %24.4h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v14.4s, %27.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %24.8h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"shll2 v14.4s, %27.8h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll v15.4s, %25.4h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v14.4s, %28.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %25.8h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v16.4h}, [%2], #8 \n" // r00
"prfm pldl1keep, [%7, #64] \n"
"ld1 {v21.4h}, [%7], #8 \n" // r50
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v22.4h, v23.4h, v24.4h, v25.4h}, [%7] \n" // r51 r52 r53 r54
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%2] \n" // r01 r02 r03 r04
"shll v15.4s, %26.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll2 v14.4s, %16.8h, #16 \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %26.8h, #16 \n"
"fmla v30.4s, v14.4s, v17.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"shll v15.4s, %27.4h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll2 v14.4s, %17.8h, #16 \n"
"fmla v29.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %27.8h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v31.4s, v15.4s, v24.4s \n"
"shll v15.4s, %28.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v15.4s, v25.4s \n"
"fadd v30.4s, v30.4s, v28.4s \n"
"fadd v31.4s, v31.4s, v29.4s \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v30.4h}, [%0], #8 \n"
"st1 {v31.4h}, [%1], #8 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5) // %7
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"w"(_k00_01), // %16
"w"(_k02_03), // %17
"w"(_k04_10), // %18
"w"(_k11_12), // %19
"w"(_k13_14), // %20
"w"(_k20_21), // %21
"w"(_k22_23), // %22
"w"(_k24_30), // %23
"w"(_k31_32), // %24
"w"(_k33_34), // %25
"w"(_k40_41), // %26
"w"(_k42_43), // %27
"w"(_k44), // %28
"w"(_bias0) // %29
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
r0 += 4 * 4 + w * 4;
r1 += 4 * 4 + w * 4;
r2 += 4 * 4 + w * 4;
r3 += 4 * 4 + w * 4;
r4 += 4 * 4 + w * 4;
r5 += 4 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
#endif // __aarch64__
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" // r00 r01 r02 r03
"shll v14.4s, %12.4h, #16 \n"
"mov v28.16b, %25.16b \n" // sum00
"mov v29.16b, %25.16b \n" // sum01
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"mov v30.16b, %25.16b \n" // sum02
"mov v31.16b, %25.16b \n" // sum03
"shll2 v15.4s, %12.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1] \n" // r04 r05 r06 r07
"fmla v31.4s, v14.4s, v19.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %13.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"fmla v31.4s, v14.4s, v21.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v14.4s, v23.4s \n"
"shll v14.4s, %15.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n" // r14 r15 r16 r17
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"fmla v31.4s, v14.4s, v20.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v21.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r20 r21 r22 r23
"fmla v31.4s, v14.4s, v22.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3] \n" // r24 r25 r26 r27
"fmla v31.4s, v14.4s, v19.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"fmla v31.4s, v14.4s, v21.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r30 r31 r32 r33
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v14.4s, v23.4s \n"
"shll v14.4s, %20.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n" // r34 r35 r36 r37
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"fmla v31.4s, v14.4s, v20.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v21.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r40 r41 r42 r43
"fmla v31.4s, v14.4s, v22.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %22.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5] \n" // r44 r45 r46 r47
"fmla v31.4s, v14.4s, v19.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"fmla v31.4s, v14.4s, v21.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"fmla v31.4s, v14.4s, v23.4s \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#else // __aarch64__
asm volatile(
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q8, d20, #16 \n" // k00
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vmov q13, q12 \n" // sum0 sum1
"vmov q14, q12 \n"
"vshll.u16 q9, d21, #16 \n" // k01
"vmov q15, q12 \n" // sum2 sum3
"vmla.f32 q12, q8, q0 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q13, q8, q1 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, q2 \n"
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64] \n" // r04 r05 r06 r07
"vmla.f32 q15, q8, q3 \n"
"vshll.u16 q10, d22, #16 \n" // k02
"vmla.f32 q12, q9, q1 \n"
"vmla.f32 q13, q9, q2 \n"
"vshll.u16 q4, d12, #16 \n"
"vmla.f32 q14, q9, q3 \n"
"vmla.f32 q15, q9, q4 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n" // k03
"vmla.f32 q12, q10, q2 \n"
"vmla.f32 q13, q10, q3 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q14, q10, q4 \n"
"vmla.f32 q15, q10, q5 \n"
"vshll.u16 q10, d16, #16 \n" // k04
"vmla.f32 q12, q11, q3 \n"
"vmla.f32 q13, q11, q4 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q11, q5 \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13
"vmla.f32 q15, q11, q6 \n"
"vshll.u16 q11, d17, #16 \n" // k10
"vmla.f32 q12, q10, q4 \n"
"vshll.u16 q0, d4, #16 \n"
"vmla.f32 q13, q10, q5 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q10, q6 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q15, q10, q7 \n"
"vshll.u16 q8, d18, #16 \n" // k11
"vmla.f32 q12, q11, q0 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q13, q11, q1 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q11, q2 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64] \n" // r14 r15 r16 r17
"vmla.f32 q15, q11, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n" // k12
"vmla.f32 q12, q8, q1 \n"
"vmla.f32 q13, q8, q2 \n"
"vshll.u16 q4, d12, #16 \n"
"vmla.f32 q14, q8, q3 \n"
"vmla.f32 q15, q8, q4 \n"
"vshll.u16 q8, d20, #16 \n" // k13
"vmla.f32 q12, q9, q2 \n"
"vmla.f32 q13, q9, q3 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q14, q9, q4 \n"
"vmla.f32 q15, q9, q5 \n"
"vshll.u16 q9, d21, #16 \n" // k14
"vmla.f32 q12, q8, q3 \n"
"vmla.f32 q13, q8, q4 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q8, q5 \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23
"vmla.f32 q15, q8, q6 \n"
"vshll.u16 q10, d22, #16 \n" // k20
"vmla.f32 q12, q9, q4 \n"
"vshll.u16 q0, d4, #16 \n"
"vmla.f32 q13, q9, q5 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q9, q6 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q15, q9, q7 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n" // k21
"vmla.f32 q12, q10, q0 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q13, q10, q1 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, q2 \n"
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64] \n" // r24 r25 r26 r27
"vmla.f32 q15, q10, q3 \n"
"vshll.u16 q10, d16, #16 \n" // k22
"vmla.f32 q12, q11, q1 \n"
"vmla.f32 q13, q11, q2 \n"
"vshll.u16 q4, d12, #16 \n"
"vmla.f32 q14, q11, q3 \n"
"vmla.f32 q15, q11, q4 \n"
"vshll.u16 q11, d17, #16 \n" // k23
"vmla.f32 q12, q10, q2 \n"
"vmla.f32 q13, q10, q3 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q14, q10, q4 \n"
"vmla.f32 q15, q10, q5 \n"
"vshll.u16 q8, d18, #16 \n" // k24
"vmla.f32 q12, q11, q3 \n"
"vmla.f32 q13, q11, q4 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q11, q5 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r30 r31 r32 r33
"vmla.f32 q15, q11, q6 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n" // k30
"vmla.f32 q12, q8, q4 \n"
"vshll.u16 q0, d4, #16 \n"
"vmla.f32 q13, q8, q5 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q8, q6 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q15, q8, q7 \n"
"vshll.u16 q8, d20, #16 \n" // k31
"vmla.f32 q12, q9, q0 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q13, q9, q1 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q9, q2 \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64] \n" // r34 r35 r36 r37
"vmla.f32 q15, q9, q3 \n"
"vshll.u16 q9, d21, #16 \n" // k32
"vmla.f32 q12, q8, q1 \n"
"vmla.f32 q13, q8, q2 \n"
"vshll.u16 q4, d12, #16 \n"
"vmla.f32 q14, q8, q3 \n"
"vmla.f32 q15, q8, q4 \n"
"vshll.u16 q10, d22, #16 \n" // k33
"vmla.f32 q12, q9, q2 \n"
"vmla.f32 q13, q9, q3 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q14, q9, q4 \n"
"vmla.f32 q15, q9, q5 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q12, q10, q3 \n"
"vshll.u16 q11, d23, #16 \n" // k34
"vmla.f32 q13, q10, q4 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q10, q5 \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43
"vmla.f32 q15, q10, q6 \n"
"vshll.u16 q10, d16, #16 \n" // k40
"vmla.f32 q12, q11, q4 \n"
"vshll.u16 q0, d4, #16 \n"
"vmla.f32 q13, q11, q5 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, q6 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q15, q11, q7 \n"
"vshll.u16 q11, d17, #16 \n" // k41
"vmla.f32 q12, q10, q0 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q13, q10, q1 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, q2 \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6 :64] \n" // r44 r45 r46 r47
"vmla.f32 q15, q10, q3 \n"
"vshll.u16 q8, d18, #16 \n" // k42
"vmla.f32 q12, q11, q1 \n"
"vmla.f32 q13, q11, q2 \n"
"vshll.u16 q4, d12, #16 \n"
"vmla.f32 q14, q11, q3 \n"
"vmla.f32 q15, q11, q4 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vmla.f32 q12, q8, q2 \n"
"vshll.u16 q9, d19, #16 \n" // k43
"vmla.f32 q13, q8, q3 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q14, q8, q4 \n"
"vmla.f32 q15, q8, q5 \n"
"vshll.u16 q8, d20, #16 \n" // k44
"vmla.f32 q12, q9, q3 \n"
"vmla.f32 q13, q9, q4 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q9, q5 \n"
"vmla.f32 q15, q9, q6 \n"
"vmla.f32 q12, q8, q4 \n"
"vmla.f32 q13, q8, q5 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q8, q6 \n"
"vmla.f32 q15, q8, q7 \n"
"sub %7, %7, #192 \n" // kptr -= 24 * 4;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d26, q14, #16 \n"
"vshrn.u32 d27, q15, #16 \n"
"vst1.u16 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.4h, v17.4h}, [%1], #16 \n" // r00 r01
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%1] \n" // r02 r03 r04 r05
"shll v14.4s, %12.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"mov v30.16b, %25.16b \n" // sum01
"mov v31.16b, %25.16b \n" // sum02
"shll2 v15.4s, %12.8h, #16 \n"
"fmul v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmul v29.4s, v14.4s, v17.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v16.4h, v17.4h}, [%2], #16 \n" // r10 r11
"shll2 v15.4s, %13.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%2] \n" // r12 r13 r14 r15
"shll v14.4s, %15.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v16.4h, v17.4h}, [%3], #16 \n" // r20 r21
"shll v14.4s, %16.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%3] \n" // r22 r23 r24 r25
"shll2 v15.4s, %17.8h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4h, v17.4h}, [%4], #16 \n" // r30 r31
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%4] \n" // r32 r33 r34 r35
"shll v14.4s, %20.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4h, v17.4h}, [%5], #16 \n" // r40 r41
"shll v14.4s, %21.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [%5] \n" // r42 r43 r44 r45
"shll2 v15.4s, %22.8h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v29.4s, v14.4s, v17.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"fadd v30.4s, v30.4s, v28.4s \n"
"fadd v31.4s, v31.4s, v29.4s \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v30.4h, v31.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#else // __aarch64__
asm volatile(
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r00 r01
"vshll.u16 q8, d20, #16 \n" // k00
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d8-d11}, [%2 :64] \n" // r02 r03 r04 r05
"vshll.u16 q0, d2, #16 \n"
"vmov q13, q12 \n" // sum0 sum1
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q9, d21, #16 \n" // k01
"vmul.f32 q14, q8, q0 \n"
"vshll.u16 q2, d8, #16 \n"
"vmul.f32 q15, q8, q1 \n"
"vshll.u16 q10, d22, #16 \n" // k02
"vmla.f32 q12, q9, q1 \n"
"pld [%3, #128] \n"
"vld1.u16 {d2-d3}, [%3 :64]! \n" // r10 r11
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q9, q2 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n" // k03
"vmla.f32 q14, q10, q2 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q15, q10, q3 \n"
"vshll.u16 q10, d16, #16 \n" // k04
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, q4 \n"
"vshll.u16 q11, d17, #16 \n" // k10
"vmla.f32 q14, q10, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q15, q10, q5 \n"
"pld [%3, #256] \n"
"vld1.u16 {d8-d11}, [%3 :64] \n" // r12 r13 r14 r15
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q8, d18, #16 \n" // k11
"vmla.f32 q12, q11, q0 \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q11, q1 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n" // k12
"vmla.f32 q14, q8, q1 \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r20 r21
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q15, q8, q2 \n"
"vshll.u16 q8, d20, #16 \n" // k13
"vmla.f32 q12, q9, q2 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, q3 \n"
"vshll.u16 q9, d21, #16 \n" // k14
"vmla.f32 q14, q8, q3 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q15, q8, q4 \n"
"vshll.u16 q10, d22, #16 \n" // k20
"vmla.f32 q12, q9, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q9, q5 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"pld [%4, #256] \n"
"vld1.u16 {d8-d11}, [%4 :64] \n" // r22 r23 r24 r25
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q11, d23, #16 \n" // k21
"vmla.f32 q14, q10, q0 \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q15, q10, q1 \n"
"vshll.u16 q10, d16, #16 \n" // k22
"vmla.f32 q12, q11, q1 \n"
"pld [%5, #128] \n"
"vld1.u16 {d2-d3}, [%5 :64]! \n" // r30 r31
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, q2 \n"
"vshll.u16 q11, d17, #16 \n" // k23
"vmla.f32 q14, q10, q2 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q15, q10, q3 \n"
"vshll.u16 q8, d18, #16 \n" // k24
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, q4 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n" // k30
"vmla.f32 q14, q8, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q15, q8, q5 \n"
"pld [%5, #256] \n"
"vld1.u16 {d8-d11}, [%5 :64] \n" // r32 r33 r34 r35
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q8, d20, #16 \n" // k31
"vmla.f32 q12, q9, q0 \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q13, q9, q1 \n"
"vshll.u16 q9, d21, #16 \n" // k32
"vmla.f32 q14, q8, q1 \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n" // r40 r41
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q15, q8, q2 \n"
"vshll.u16 q10, d22, #16 \n" // k33
"vmla.f32 q12, q9, q2 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n" // k34
"vmla.f32 q14, q10, q3 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q15, q10, q4 \n"
"vshll.u16 q10, d16, #16 \n" // k40
"vmla.f32 q12, q11, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q13, q11, q5 \n"
"pld [%6, #256] \n"
"vld1.u16 {d8-d11}, [%6 :64] \n" // r42 r43 r44 r45
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q11, d17, #16 \n" // k41
"vmla.f32 q14, q10, q0 \n"
"vshll.u16 q2, d8, #16 \n"
"vmla.f32 q15, q10, q1 \n"
"vshll.u16 q8, d18, #16 \n" // k42
"vmla.f32 q12, q11, q1 \n"
"vshll.u16 q3, d9, #16 \n"
"vmla.f32 q13, q11, q2 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vshll.u16 q9, d19, #16 \n" // k43
"vmla.f32 q14, q8, q2 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q15, q8, q3 \n"
"vshll.u16 q8, d20, #16 \n" // k44
"vmla.f32 q12, q9, q3 \n"
"vmla.f32 q13, q9, q4 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q14, q8, q4 \n"
"vmla.f32 q15, q8, q5 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %7, %7, #192 \n" // kptr -= 24 * 4;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vst1.u16 {d24-d25}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v16.4h}, [%1], #8 \n" // r00
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%1] \n" // r01 r02 r03 r04
"shll v14.4s, %12.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"mov v31.16b, %25.16b \n" // sum01
"shll2 v15.4s, %12.8h, #16 \n"
"fmul v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmul v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %13.8h, #16 \n"
"fmul v30.4s, v14.4s, v18.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v16.4h}, [%2], #8 \n" // r10
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%2] \n" // r11 r12 r13 r14
"shll v14.4s, %15.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v30.4s, v14.4s, v17.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v16.4h}, [%3], #8 \n" // r20
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%3] \n" // r21 r22 r23 r24
"shll2 v15.4s, %17.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v16.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"prfm pldl1keep, [%4, #64] \n"
"ld1 {v16.4h}, [%4], #8 \n" // r30
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%4] \n" // r31 r32 r33 r34
"shll v14.4s, %20.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v16.4h}, [%5], #8 \n" // r40
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v17.4h, v18.4h, v19.4h, v20.4h}, [%5] \n" // r41 r42 r43 r44
"shll2 v15.4s, %22.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fadd v29.4s, v29.4s, v30.4s \n"
"fadd v31.4s, v31.4s, v28.4s \n"
"fadd v31.4s, v31.4s, v29.4s \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v31.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#else // __aarch64__
asm volatile(
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n" // sum0
"pld [%2, #64] \n"
"vld1.u16 {d1}, [%2 :64]! \n" // r00
"vshll.u16 q8, d20, #16 \n" // k00
"pld [%2, #256] \n"
"vld1.u16 {d6-d9}, [%2 :64] \n" // r01 r02 r03 r04
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q9, d21, #16 \n" // k01
"vshll.u16 q1, d6, #16 \n"
"vmul.f32 q13, q8, q0 \n"
"pld [%3, #64] \n"
"vld1.u16 {d1}, [%3 :64]! \n" // r10
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q10, d22, #16 \n" // k02
"vmul.f32 q14, q9, q1 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q11, d23, #16 \n" // k03
"vmul.f32 q15, q10, q2 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q10, d16, #16 \n" // k04
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q11, d17, #16 \n" // k10
"vmla.f32 q13, q10, q4 \n"
"pld [%3, #256] \n"
"vld1.u16 {d6-d9}, [%3 :64] \n" // r11 r12 r13 r14
"vshll.u16 q8, d18, #16 \n" // k11
"vshll.u16 q1, d6, #16 \n"
"vmla.f32 q14, q11, q0 \n"
"pld [%4, #64] \n"
"vld1.u16 {d1}, [%4 :64]! \n" // r20
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q9, d19, #16 \n" // k12
"vmla.f32 q15, q8, q1 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q8, d20, #16 \n" // k13
"vmla.f32 q12, q9, q2 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q9, d21, #16 \n" // k14
"vmla.f32 q13, q8, q3 \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q10, d22, #16 \n" // k20
"vmla.f32 q14, q9, q4 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"pld [%4, #256] \n"
"vld1.u16 {d6-d9}, [%4 :64] \n" // r21 r22 r23 r24
"vshll.u16 q11, d23, #16 \n" // k21
"vshll.u16 q1, d6, #16 \n"
"vmla.f32 q15, q10, q0 \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5 :64]! \n" // r30
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q10, d16, #16 \n" // k22
"vmla.f32 q12, q11, q1 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q11, d17, #16 \n" // k23
"vmla.f32 q13, q10, q2 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q8, d18, #16 \n" // k24
"vmla.f32 q14, q11, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q9, d19, #16 \n" // k30
"vmla.f32 q15, q8, q4 \n"
"pld [%5, #256] \n"
"vld1.u16 {d6-d9}, [%5 :64] \n" // r31 r32 r33 r34
"vshll.u16 q8, d20, #16 \n" // k31
"vshll.u16 q1, d6, #16 \n"
"vmla.f32 q12, q9, q0 \n"
"pld [%6, #64] \n"
"vld1.u16 {d1}, [%6 :64]! \n" // r40
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q9, d21, #16 \n" // k32
"vmla.f32 q13, q8, q1 \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q10, d22, #16 \n" // k33
"vmla.f32 q14, q9, q2 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q11, d23, #16 \n" // k34
"vmla.f32 q15, q10, q3 \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q10, d16, #16 \n" // k40
"vmla.f32 q12, q11, q4 \n"
"pld [%6, #256] \n"
"vld1.u16 {d6-d9}, [%6 :64] \n" // r41 r42 r43 r44
"vshll.u16 q11, d17, #16 \n" // k41
"vshll.u16 q1, d6, #16 \n"
"vmla.f32 q13, q10, q0 \n"
"vshll.u16 q2, d7, #16 \n"
"vshll.u16 q8, d18, #16 \n" // k42
"vmla.f32 q14, q11, q1 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q9, d19, #16 \n" // k43
"vmla.f32 q15, q8, q2 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q8, d20, #16 \n" // k44
"vmla.f32 q12, q9, q3 \n"
"vmla.f32 q13, q8, q4 \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"vadd.f32 q12, q12, q14 \n"
"sub %7, %7, #192 \n" // kptr -= 24 * 4;
"vshrn.u32 d24, q12, #16 \n"
"vst1.u16 {d24}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
r3 += 4 * 4;
r4 += 4 * 4;
}
}
}
static void convdw5x5s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
#if __aarch64__
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
#endif // __aarch64__
const unsigned short* kptr = kernel.row<const unsigned short>(g);
unsigned short* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
const unsigned short* r4 = img0.row<const unsigned short>(4);
#if __aarch64__
// 4 * 25
uint16x8_t _k00_01 = vld1q_u16(kptr);
uint16x8_t _k02_03 = vld1q_u16(kptr + 8);
uint16x8_t _k04_10 = vld1q_u16(kptr + 16);
uint16x8_t _k11_12 = vld1q_u16(kptr + 24);
uint16x8_t _k13_14 = vld1q_u16(kptr + 32);
uint16x8_t _k20_21 = vld1q_u16(kptr + 40);
uint16x8_t _k22_23 = vld1q_u16(kptr + 48);
uint16x8_t _k24_30 = vld1q_u16(kptr + 56);
uint16x8_t _k31_32 = vld1q_u16(kptr + 64);
uint16x8_t _k33_34 = vld1q_u16(kptr + 72);
uint16x8_t _k40_41 = vld1q_u16(kptr + 80);
uint16x8_t _k42_43 = vld1q_u16(kptr + 88);
uint16x4_t _k44 = vld1_u16(kptr + 96);
#else // __aarch64__
float bias0_data[4];
if (bias)
{
bias0_data[0] = bias[g * 4 + 0];
bias0_data[1] = bias[g * 4 + 1];
bias0_data[2] = bias[g * 4 + 2];
bias0_data[3] = bias[g * 4 + 3];
}
else
{
bias0_data[0] = 0.f;
bias0_data[1] = 0.f;
bias0_data[2] = 0.f;
bias0_data[3] = 0.f;
}
const float* bias0_data_ptr = bias0_data;
#endif // __aarch64__
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n" // r04 r05 r06 r07
"shll v14.4s, %12.4h, #16 \n"
"mov v28.16b, %25.16b \n" // sum00
"mov v29.16b, %25.16b \n" // sum01
"mov v30.16b, %25.16b \n" // sum02
"mov v31.16b, %25.16b \n" // sum03
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%1] \n" // r08 r09 r010
"shll2 v15.4s, %12.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v14.4s, v22.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %13.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"fmla v31.4s, v14.4s, v24.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v30.4s, v15.4s, v23.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v25.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" // r14 r15 r16 r17
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, v14.4s, v24.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v14.4s, v26.4s \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%2] \n" // r18 r19 r110
"shll v14.4s, %15.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v14.4s, v21.4s \n"
"fmla v31.4s, v14.4s, v23.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"fmla v31.4s, v15.4s, v24.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r20 r21 r22 r23
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v30.4s, v14.4s, v23.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v14.4s, v25.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v29.4s, v15.4s, v22.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%3], #32 \n" // r24 r25 r26 r27
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, v15.4s, v24.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v26.4s \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%3] \n" // r28 r29 r210
"shll2 v15.4s, %17.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v14.4s, v22.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"fmla v31.4s, v14.4s, v24.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r30 r31 r32 r33
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v30.4s, v15.4s, v23.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v25.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4], #32 \n" // r34 r35 r36 r37
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, v14.4s, v24.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v14.4s, v26.4s \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%4] \n" // r38 r39 r310
"shll v14.4s, %20.4h, #16 \n"
"fmla v28.4s, v15.4s, v16.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v14.4s, v21.4s \n"
"fmla v31.4s, v14.4s, v23.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v28.4s, v15.4s, v18.4s \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v30.4s, v15.4s, v22.4s \n"
"fmla v31.4s, v15.4s, v24.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r40 r41 r42 r43
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v30.4s, v14.4s, v23.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v14.4s, v25.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v28.4s, v15.4s, v20.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v29.4s, v15.4s, v22.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5], #32 \n" // r44 r45 r46 r47
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, v15.4s, v24.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v31.4s, v15.4s, v26.4s \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%5] \n" // r48 r49 r410
"shll2 v15.4s, %22.8h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v14.4s, v22.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v28.4s, v15.4s, v17.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v30.4s, v15.4s, v21.4s \n"
"fmla v31.4s, v15.4s, v23.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v30.4s, v14.4s, v22.4s \n"
"fmla v31.4s, v14.4s, v24.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v28.4s, v15.4s, v19.4s \n"
"fmla v29.4s, v15.4s, v21.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v30.4s, v15.4s, v23.4s \n"
"fmla v31.4s, v15.4s, v25.4s \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, v14.4s, v24.4s \n"
"fmla v31.4s, v14.4s, v26.4s \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#else // __aarch64__
asm volatile(
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n"
"vmov q13, q12 \n" // sum0 sum1
"vshll.u16 q8, d20, #16 \n" // k00
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03
"vmov q14, q12 \n"
"vmov q15, q12 \n" // sum2 sum3
"vshll.u16 q9, d21, #16 \n" // k01
"pld [%2, #256] \n"
"vld1.u16 {d12-d15}, [%2 :64]! \n" // r04 r05 r06 r07
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q12, q8, q0 \n"
"vmla.f32 q13, q8, q2 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q8, q4 \n"
"vmla.f32 q15, q8, q6 \n"
"vshll.u16 q10, d22, #16 \n" // k02
"vmla.f32 q12, q9, q1 \n"
"vmla.f32 q13, q9, q3 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q9, q5 \n"
"vmla.f32 q15, q9, q7 \n"
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r08 r09
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q12, q10, q2 \n"
"vshll.u16 q11, d23, #16 \n" // k03
"vmla.f32 q13, q10, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q14, q10, q6 \n"
"vmla.f32 q15, q10, q0 \n"
"vshll.u16 q10, d16, #16 \n" // k04
"vmla.f32 q12, q11, q3 \n"
"vmla.f32 q13, q11, q5 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q14, q11, q7 \n"
"vmla.f32 q15, q11, q1 \n"
"pld [%2, #64] \n"
"vld1.u16 {d5}, [%2 :64] \n" // r010
"vmla.f32 q12, q10, q4 \n"
"vshll.u16 q11, d17, #16 \n" // k10
"vmla.f32 q13, q10, q6 \n"
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q14, q10, q0 \n"
"pld [%3, #256] \n"
"vld1.u16 {d12-d15}, [%3 :64]! \n" // r10 r11 r12 r13
"vmla.f32 q15, q10, q2 \n"
"vshll.u16 q8, d18, #16 \n" // k11
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r14 r15 r16 r17
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q12, q11, q4 \n"
"vmla.f32 q13, q11, q6 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q14, q11, q0 \n"
"vmla.f32 q15, q11, q2 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vmla.f32 q12, q8, q5 \n"
"vshll.u16 q9, d19, #16 \n" // k12
"vmla.f32 q13, q8, q7 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, q1 \n"
"vmla.f32 q15, q8, q3 \n"
"pld [%3, #128] \n"
"vld1.u16 {d10-d11}, [%3 :64]! \n" // r18 r19
"vmla.f32 q12, q9, q6 \n"
"vshll.u16 q8, d20, #16 \n" // k13
"vmla.f32 q13, q9, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q14, q9, q2 \n"
"vmla.f32 q15, q9, q4 \n"
"vshll.u16 q9, d21, #16 \n" // k14
"vmla.f32 q12, q8, q7 \n"
"vmla.f32 q13, q8, q1 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q14, q8, q3 \n"
"vmla.f32 q15, q8, q5 \n"
"pld [%3, #64] \n"
"vld1.u16 {d13}, [%3 :64] \n" // r110
"vmla.f32 q12, q9, q0 \n"
"vshll.u16 q10, d22, #16 \n" // k20
"vmla.f32 q13, q9, q2 \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q14, q9, q4 \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23
"vmla.f32 q15, q9, q6 \n"
"vshll.u16 q11, d23, #16 \n" // k21
"pld [%4, #256] \n"
"vld1.u16 {d12-d15}, [%4 :64]! \n" // r24 r25 r26 r27
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q12, q10, q0 \n"
"vmla.f32 q13, q10, q2 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q10, q4 \n"
"vmla.f32 q15, q10, q6 \n"
"vshll.u16 q10, d16, #16 \n" // k22
"vmla.f32 q12, q11, q1 \n"
"vmla.f32 q13, q11, q3 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, q5 \n"
"vmla.f32 q15, q11, q7 \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r28 r29
"vmla.f32 q12, q10, q2 \n"
"vshll.u16 q11, d17, #16 \n" // k23
"vmla.f32 q13, q10, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q14, q10, q6 \n"
"vmla.f32 q15, q10, q0 \n"
"vshll.u16 q8, d18, #16 \n" // k24
"vmla.f32 q12, q11, q3 \n"
"vmla.f32 q13, q11, q5 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q14, q11, q7 \n"
"vmla.f32 q15, q11, q1 \n"
"pld [%4, #64] \n"
"vld1.u16 {d5}, [%4 :64] \n" // r210
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vmla.f32 q12, q8, q4 \n"
"vshll.u16 q9, d19, #16 \n" // k30
"vmla.f32 q13, q8, q6 \n"
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q14, q8, q0 \n"
"pld [%5, #256] \n"
"vld1.u16 {d12-d15}, [%5 :64]! \n" // r30 r31 r32 r33
"vmla.f32 q15, q8, q2 \n"
"vshll.u16 q8, d20, #16 \n" // k31
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r34 r35 r36 r37
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vmla.f32 q12, q9, q4 \n"
"vmla.f32 q13, q9, q6 \n"
"vshll.u16 q2, d6, #16 \n"
"vmla.f32 q14, q9, q0 \n"
"vmla.f32 q15, q9, q2 \n"
"vshll.u16 q9, d21, #16 \n" // k32
"vmla.f32 q12, q8, q5 \n"
"vmla.f32 q13, q8, q7 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q8, q1 \n"
"vmla.f32 q15, q8, q3 \n"
"pld [%5, #128] \n"
"vld1.u16 {d10-d11}, [%5 :64]! \n" // r38 r39
"vmla.f32 q12, q9, q6 \n"
"vshll.u16 q10, d22, #16 \n" // k33
"vmla.f32 q13, q9, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q14, q9, q2 \n"
"vmla.f32 q15, q9, q4 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q12, q10, q7 \n"
"vshll.u16 q11, d23, #16 \n" // k34
"vmla.f32 q13, q10, q1 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q14, q10, q3 \n"
"vmla.f32 q15, q10, q5 \n"
"pld [%5, #64] \n"
"vld1.u16 {d13}, [%5 :64] \n" // r310
"vmla.f32 q12, q11, q0 \n"
"vshll.u16 q10, d16, #16 \n" // k40
"vmla.f32 q13, q11, q2 \n"
"vshll.u16 q6, d13, #16 \n"
"vmla.f32 q14, q11, q4 \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43
"vmla.f32 q15, q11, q6 \n"
"vshll.u16 q11, d17, #16 \n" // k41
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6 :64]! \n" // r44 r45 r46 r47
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vmla.f32 q12, q10, q0 \n"
"vmla.f32 q13, q10, q2 \n"
"vshll.u16 q6, d14, #16 \n"
"vmla.f32 q14, q10, q4 \n"
"vmla.f32 q15, q10, q6 \n"
"vshll.u16 q8, d18, #16 \n" // k42
"vmla.f32 q12, q11, q1 \n"
"vmla.f32 q13, q11, q3 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q14, q11, q5 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vmla.f32 q15, q11, q7 \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n" // r48 r49
"vmla.f32 q12, q8, q2 \n"
"vshll.u16 q9, d19, #16 \n" // k43
"vmla.f32 q13, q8, q4 \n"
"vshll.u16 q0, d2, #16 \n"
"vmla.f32 q14, q8, q6 \n"
"vmla.f32 q15, q8, q0 \n"
"vshll.u16 q8, d20, #16 \n" // k44
"vmla.f32 q12, q9, q3 \n"
"vmla.f32 q13, q9, q5 \n"
"vshll.u16 q1, d3, #16 \n"
"vmla.f32 q14, q9, q7 \n"
"vmla.f32 q15, q9, q1 \n"
"pld [%6, #64] \n"
"vld1.u16 {d5}, [%6 :64] \n" // r410
"vmla.f32 q12, q8, q4 \n"
"vmla.f32 q13, q8, q6 \n"
"vshll.u16 q2, d5, #16 \n"
"vmla.f32 q14, q8, q0 \n"
"vmla.f32 q15, q8, q2 \n"
"sub %7, %7, #192 \n" // kptr -= 24 * 4;
"sub %2, %2, #16 \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
"sub %5, %5, #16 \n"
"sub %6, %6, #16 \n"
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d26, q14, #16 \n"
"vshrn.u32 d27, q15, #16 \n"
"vst1.u16 {d24-d27}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%1] \n" // r04 r05 r06
"shll v14.4s, %12.4h, #16 \n"
"shll2 v15.4s, %12.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"mov v30.16b, %25.16b \n" // sum00
"mov v31.16b, %25.16b \n" // sum01
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmul v28.4s, v14.4s, v16.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmul v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %13.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13
"shll v22.4s, v22.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%2] \n" // r14 r15 r16
"shll v14.4s, %15.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%3], #32 \n" // r20 r21 r22 r23
"shll v22.4s, v22.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%3] \n" // r24 r25 r26
"shll2 v15.4s, %17.8h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" // r30 r31 r32 r33
"shll v22.4s, v22.4h, #16 \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%4] \n" // r34 r35 r36
"shll v14.4s, %20.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v30.4s, v15.4s, v16.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v14.4s, v19.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v30.4s, v15.4s, v18.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n" // r40 r41 r42 r43
"shll v22.4s, v22.4h, #16 \n"
"fmla v29.4s, v14.4s, v21.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v30.4s, v15.4s, v20.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v31.4s, v15.4s, v22.4s \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%5] \n" // r44 r45 r46
"shll2 v15.4s, %22.8h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v29.4s, v14.4s, v18.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v30.4s, v15.4s, v17.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v29.4s, v14.4s, v20.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v30.4s, v15.4s, v19.4s \n"
"fmla v31.4s, v15.4s, v21.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fmla v29.4s, v14.4s, v22.4s \n"
"fadd v30.4s, v30.4s, v28.4s \n"
"fadd v31.4s, v31.4s, v29.4s \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v30.4h, v31.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#else // __aarch64__
asm volatile(
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2 :64]! \n" // r00 r01 r02 r03
"vshll.u16 q8, d20, #16 \n" // k00
"pld [%2, #256] \n"
"vld1.u16 {d10-d12}, [%2 :64] \n" // r04 r05 r06
"vmov q13, q12 \n" // sum0 sum1
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q9, d21, #16 \n" // k01
"vmul.f32 q14, q8, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmul.f32 q15, q8, q2 \n"
"vshll.u16 q10, d22, #16 \n" // k02
"vmla.f32 q12, q9, q1 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q14, q10, q2 \n"
"vshll.u16 q11, d23, #16 \n" // k03
"vmla.f32 q15, q10, q4 \n"
"vshll.u16 q10, d16, #16 \n" // k04
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q13, q11, q5 \n"
"vshll.u16 q11, d17, #16 \n" // k10
"vmla.f32 q14, q10, q4 \n"
"vmla.f32 q15, q10, q6 \n"
"pld [%3, #256] \n"
"vld1.u16 {d4-d7}, [%3 :64]! \n" // r10 r11 r12 r13
"vshll.u16 q8, d18, #16 \n" // k11
"pld [%3, #256] \n"
"vld1.u16 {d10-d12}, [%3 :64] \n" // r14 r15 r16
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q11, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, q2 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vmla.f32 q14, q8, q1 \n"
"vshll.u16 q9, d19, #16 \n" // k12
"vmla.f32 q15, q8, q3 \n"
"vshll.u16 q8, d20, #16 \n" // k13
"vmla.f32 q12, q9, q2 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q9, q4 \n"
"vshll.u16 q9, d21, #16 \n" // k14
"vmla.f32 q14, q8, q3 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q15, q8, q5 \n"
"vshll.u16 q10, d22, #16 \n" // k20
"vmla.f32 q12, q9, q4 \n"
"vmla.f32 q13, q9, q6 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"pld [%4, #256] \n"
"vld1.u16 {d4-d7}, [%4 :64]! \n" // r20 r21 r22 r23
"vshll.u16 q11, d23, #16 \n" // k21
"pld [%4, #256] \n"
"vld1.u16 {d10-d12}, [%4 :64] \n" // r24 r25 r26
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, q0 \n"
"vmla.f32 q15, q10, q2 \n"
"vshll.u16 q10, d16, #16 \n" // k22
"vmla.f32 q12, q11, q1 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q11, q3 \n"
"vshll.u16 q11, d17, #16 \n" // k23
"vmla.f32 q14, q10, q2 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q15, q10, q4 \n"
"vshll.u16 q8, d18, #16 \n" // k24
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q13, q11, q5 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vmla.f32 q14, q8, q4 \n"
"vshll.u16 q9, d19, #16 \n" // k30
"vmla.f32 q15, q8, q6 \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5 :64]! \n" // r30 r31 r32 r33
"vshll.u16 q8, d20, #16 \n" // k31
"pld [%5, #256] \n"
"vld1.u16 {d10-d12}, [%5 :64] \n" // r34 r35 r36
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q12, q9, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q13, q9, q2 \n"
"vshll.u16 q9, d21, #16 \n" // k32
"vmla.f32 q14, q8, q1 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q15, q8, q3 \n"
"vshll.u16 q10, d22, #16 \n" // k33
"vmla.f32 q12, q9, q2 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q13, q9, q4 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vmla.f32 q14, q10, q3 \n"
"vshll.u16 q11, d23, #16 \n" // k34
"vmla.f32 q15, q10, q5 \n"
"vshll.u16 q10, d16, #16 \n" // k40
"vmla.f32 q12, q11, q4 \n"
"vmla.f32 q13, q11, q6 \n"
"pld [%6, #256] \n"
"vld1.u16 {d4-d7}, [%6 :64]! \n" // r40 r41 r42 r43
"vshll.u16 q11, d17, #16 \n" // k41
"pld [%6, #256] \n"
"vld1.u16 {d10-d12}, [%6 :64] \n" // r44 r45 r46
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q14, q10, q0 \n"
"vshll.u16 q4, d10, #16 \n"
"vmla.f32 q15, q10, q2 \n"
"vshll.u16 q8, d18, #16 \n" // k42
"vmla.f32 q12, q11, q1 \n"
"vshll.u16 q5, d11, #16 \n"
"vmla.f32 q13, q11, q3 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vmla.f32 q14, q8, q2 \n"
"vshll.u16 q9, d19, #16 \n" // k43
"vmla.f32 q15, q8, q4 \n"
"vshll.u16 q8, d20, #16 \n" // k44
"vmla.f32 q12, q9, q3 \n"
"vshll.u16 q6, d12, #16 \n"
"vmla.f32 q13, q9, q5 \n"
"vmla.f32 q14, q8, q4 \n"
"vmla.f32 q15, q8, q6 \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %7, %7, #192 \n" // kptr -= 24 * 4;
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vst1.u16 {d24-d25}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v16.4h, v17.4h}, [%1], #16 \n" // r00 r01
"prfm pldl1keep, [%1, #192] \n"
"ld1 {v18.4h, v19.4h, v20.4h}, [%1] \n" // r02 r03 r04
"shll v14.4s, %12.4h, #16 \n"
"mov v31.16b, %25.16b \n" // sum00
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll2 v15.4s, %12.8h, #16 \n"
"fmul v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %13.4h, #16 \n"
"fmul v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %13.8h, #16 \n"
"fmul v30.4s, v14.4s, v18.4s \n"
"shll v14.4s, %14.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %14.8h, #16 \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v16.4h, v17.4h}, [%2], #16 \n" // r10 r11
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v18.4h, v19.4h, v20.4h}, [%2] \n" // r12 r13 r14
"shll v14.4s, %15.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v29.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %15.8h, #16 \n"
"fmla v30.4s, v14.4s, v17.4s \n"
"shll v14.4s, %16.4h, #16 \n"
"fmla v31.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %16.8h, #16 \n"
"fmla v28.4s, v14.4s, v19.4s \n"
"shll v14.4s, %17.4h, #16 \n"
"fmla v29.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v16.4h, v17.4h}, [%3], #16 \n" // r20 r21
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v18.4h, v19.4h, v20.4h}, [%3] \n" // r22 r23 r24
"shll2 v15.4s, %17.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v30.4s, v14.4s, v16.4s \n"
"shll v14.4s, %18.4h, #16 \n"
"fmla v31.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %18.8h, #16 \n"
"fmla v28.4s, v14.4s, v18.4s \n"
"shll v14.4s, %19.4h, #16 \n"
"fmla v29.4s, v15.4s, v19.4s \n"
"shll2 v15.4s, %19.8h, #16 \n"
"fmla v30.4s, v14.4s, v20.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4h, v17.4h}, [%4], #16 \n" // r30 r31
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v18.4h, v19.4h, v20.4h}, [%4] \n" // r32 r33 r34
"shll v14.4s, %20.4h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v31.4s, v15.4s, v16.4s \n"
"shll2 v15.4s, %20.8h, #16 \n"
"fmla v28.4s, v14.4s, v17.4s \n"
"shll v14.4s, %21.4h, #16 \n"
"fmla v29.4s, v15.4s, v18.4s \n"
"shll2 v15.4s, %21.8h, #16 \n"
"fmla v30.4s, v14.4s, v19.4s \n"
"shll v14.4s, %22.4h, #16 \n"
"fmla v31.4s, v15.4s, v20.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v16.4h, v17.4h}, [%5], #16 \n" // r40 r41
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v18.4h, v19.4h, v20.4h}, [%5] \n" // r42 r43 r44
"shll2 v15.4s, %22.8h, #16 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v28.4s, v14.4s, v16.4s \n"
"shll v14.4s, %23.4h, #16 \n"
"fmla v29.4s, v15.4s, v17.4s \n"
"shll2 v15.4s, %23.8h, #16 \n"
"fmla v30.4s, v14.4s, v18.4s \n"
"shll v14.4s, %24.4h, #16 \n"
"fmla v31.4s, v15.4s, v19.4s \n"
"fmla v28.4s, v14.4s, v20.4s \n"
"fadd v29.4s, v29.4s, v30.4s \n"
"fadd v31.4s, v31.4s, v28.4s \n"
"fadd v31.4s, v31.4s, v29.4s \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v31.4h}, [%0], #8 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4) // %5
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"w"(_k00_01), // %12
"w"(_k02_03), // %13
"w"(_k04_10), // %14
"w"(_k11_12), // %15
"w"(_k13_14), // %16
"w"(_k20_21), // %17
"w"(_k22_23), // %18
"w"(_k24_30), // %19
"w"(_k31_32), // %20
"w"(_k33_34), // %21
"w"(_k40_41), // %22
"w"(_k42_43), // %23
"w"(_k44), // %24
"w"(_bias0) // %25
: "memory", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#else // __aarch64__
asm volatile(
"pld [%2, #128] \n"
"vld1.u16 {d2-d3}, [%2 :64]! \n" // r00 r01
"pld [%2, #192] \n"
"vld1.u16 {d6-d8}, [%2 :64] \n" // r02 r03 r04
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q8, d20, #16 \n" // k00
"pld [%1, #128] \n"
"vld1.f32 {d24-d25}, [%1] \n" // sum0
"vshll.u16 q9, d21, #16 \n" // k01
"vmul.f32 q13, q8, q0 \n"
"vshll.u16 q10, d22, #16 \n" // k02
"vmul.f32 q14, q9, q1 \n"
"pld [%3, #128] \n"
"vld1.u16 {d14-d15}, [%3 :64]! \n" // r10 r11
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n" // k03
"vmul.f32 q15, q10, q2 \n"
"vshll.u16 q10, d16, #16 \n" // k04
"vmla.f32 q12, q11, q3 \n"
"vshll.u16 q11, d17, #16 \n" // k10
"vmla.f32 q13, q10, q4 \n"
"pld [%3, #192] \n"
"vld1.u16 {d8-d10}, [%3 :64] \n" // r12 r13 r14
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vshll.u16 q8, d18, #16 \n" // k11
"vmla.f32 q14, q11, q6 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n" // k12
"vmla.f32 q15, q8, q7 \n"
"pld [%4, #128] \n"
"vld1.u16 {d2-d3}, [%4 :64]! \n" // r20 r21
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q5, d10, #16 \n"
"vshll.u16 q8, d20, #16 \n" // k13
"vmla.f32 q12, q9, q3 \n"
"vshll.u16 q9, d21, #16 \n" // k14
"vmla.f32 q13, q8, q4 \n"
"vshll.u16 q10, d22, #16 \n" // k20
"vmla.f32 q14, q9, q5 \n"
"pld [%4, #192] \n"
"vld1.u16 {d6-d8}, [%4 :64] \n" // r22 r23 r24
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n" // k21
"vmla.f32 q15, q10, q0 \n"
"vshll.u16 q10, d16, #16 \n" // k22
"vmla.f32 q12, q11, q1 \n"
"pld [%5, #128] \n"
"vld1.u16 {d14-d15}, [%5 :64]! \n" // r30 r31
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"vshll.u16 q11, d17, #16 \n" // k23
"vmla.f32 q13, q10, q2 \n"
"vshll.u16 q8, d18, #16 \n" // k24
"vmla.f32 q14, q11, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d20-d23}, [%7 :64]! \n"
"vshll.u16 q9, d19, #16 \n" // k30
"vmla.f32 q15, q8, q4 \n"
"pld [%5, #192] \n"
"vld1.u16 {d8-d10}, [%5 :64] \n" // r32 r33 r34
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vshll.u16 q8, d20, #16 \n" // k31
"vmla.f32 q12, q9, q6 \n"
"vshll.u16 q9, d21, #16 \n" // k32
"vmla.f32 q13, q8, q7 \n"
"pld [%6, #128] \n"
"vld1.u16 {d2-d3}, [%6 :64]! \n" // r40 r41
"vshll.u16 q3, d8, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vshll.u16 q5, d10, #16 \n"
"vshll.u16 q10, d22, #16 \n" // k33
"vmla.f32 q14, q9, q3 \n"
"pld [%7, #256] \n"
"vld1.u16 {d16-d19}, [%7 :64]! \n"
"vshll.u16 q11, d23, #16 \n" // k34
"vmla.f32 q15, q10, q4 \n"
"vshll.u16 q10, d16, #16 \n" // k40
"vmla.f32 q12, q11, q5 \n"
"pld [%6, #192] \n"
"vld1.u16 {d6-d8}, [%6 :64] \n" // r42 r43 r44
"vshll.u16 q0, d2, #16 \n"
"vshll.u16 q1, d3, #16 \n"
"vshll.u16 q11, d17, #16 \n" // k41
"vmla.f32 q13, q10, q0 \n"
"vshll.u16 q8, d18, #16 \n" // k42
"vmla.f32 q14, q11, q1 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d8, #16 \n"
"pld [%7, #64] \n"
"vld1.u16 {d20}, [%7 :64] \n"
"vshll.u16 q9, d19, #16 \n" // k43
"vmla.f32 q15, q8, q2 \n"
"vshll.u16 q8, d20, #16 \n" // k44
"vmla.f32 q12, q9, q3 \n"
"vmla.f32 q13, q8, q4 \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"sub %7, %7, #192 \n" // kptr -= 24 * 4;
"vadd.f32 q12, q12, q14 \n"
"vshrn.u32 d24, q12, #16 \n"
"vst1.u16 {d24}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(bias0_data_ptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(kptr) // %7
: "0"(outptr0),
"1"(bias0_data_ptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
GB_binop__ge_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int8)
// A*D function (colscale): GB (_AxD__ge_int8)
// D*A function (rowscale): GB (_DxB__ge_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int8)
// C=scalar+B GB (_bind1st__ge_int8)
// C=scalar+B' GB (_bind1st_tran__ge_int8)
// C=A+scalar GB (_bind2nd__ge_int8)
// C=A'+scalar GB (_bind2nd_tran__ge_int8)
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_INT8 || GxB_NO_GE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ge_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
elemwise_binary_scalar_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_scalar_op.h
* \brief Function definition of elementwise binary scalar operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <utility>
#include <string>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "elemwise_unary_op.h"
namespace mxnet {
namespace op {
struct NumpyBinaryScalarParam : public dmlc::Parameter<NumpyBinaryScalarParam> {
double scalar;
bool is_int;
DMLC_DECLARE_PARAMETER(NumpyBinaryScalarParam) {
DMLC_DECLARE_FIELD(scalar)
.set_default(1)
.describe("Scalar input value");
DMLC_DECLARE_FIELD(is_int)
.set_default(true)
.describe("Indicate whether scalar input is int type");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream scalar_s, is_int_s;
scalar_s << scalar;
is_int_s << is_int;
(*dict)["scalar"] = scalar_s.str();
(*dict)["is_int"] = is_int_s.str();
}
};
inline bool NumpyBinaryScalarType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
if (common::is_int(in_attrs->at(0)) && !scalar_is_int) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat64);
} else if (in_attrs->at(0) == mshadow::kBool) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, scalar_is_int ? mshadow::kInt64 : mshadow::kFloat64);
} else {
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
}
return out_attrs->at(0) != -1;
}
class BinaryScalarOp : public UnaryOp {
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
CHECK_EQ(output.shape(), input.shape());
const int64_t row_count = output.shape()[0];
const int64_t items_per_row = output.shape().Size() / row_count;
const DType result_for_zero = OP::Map(DType(0), DType(alpha));
mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream);
mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream);
const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size();
if (sparse_row_count != row_count) {
mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data(
rowsparse::kIdx).FlatTo1D<cpu, IType>(stream);
int64_t input_iter = 0;
int64_t output_row = 0;
IType next_input_row = 0;
while (output_row < row_count) {
next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter])
: row_count;
// Split up into blocks of contiguous data and do those together
// Do contiguous dense blocks
const int64_t dense_block_count = next_input_row - output_row;
if (dense_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch(
stream,
items_per_row * dense_block_count,
output_data.dptr_ + items_per_row * output_row,
result_for_zero);
});
output_row += dense_block_count;
continue;
}
// Do contiguous sparse blocks
int64_t next_non_contiguous_sparse = input_iter;
while (next_non_contiguous_sparse < sparse_row_count - 1) {
if (row_indexes[next_non_contiguous_sparse + 1]
!= row_indexes[next_non_contiguous_sparse] + 1) {
break;
}
++next_non_contiguous_sparse;
}
const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1;
if (sparse_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * sparse_block_count,
&output_data.dptr_[items_per_row * output_row],
&input_data.dptr_[items_per_row * input_iter],
DType(alpha));
});
output_row += sparse_block_count;
input_iter += sparse_block_count;
continue;
}
}
} else {
// All rows exist (eventually we don't have to do complex
// things to call GPU kernels because we don't need to access row indices)
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * row_count,
output_data.dptr_,
input_data.dptr_,
DType(alpha));
});
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
CHECK_EQ(output.shape(), input.shape());
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
const DType dense_fill_val = OP::Map(DType(0), DType(alpha));
const TBlob column_indexes = input.aux_data(csr::kIdx);
const size_t item_count = column_indexes.Size();
// Pre-fill dense with 0-input/output value
FillDense<DType>(stream, output.shape().Size(), dense_fill_val,
req, output.data().dptr<DType>());
mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data());
if (item_count) {
const DType *in = input.data().dptr<DType>();
const IType *column_indexes_ptr = column_indexes.dptr<IType>();
const auto row_count = static_cast<size_t>(input.shape()[0]);
const TBlob row_starts = input.aux_data(csr::kIndPtr);
const CType *row_starts_ptr = row_starts.dptr<CType>();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(row_count); ++i) {
const bool last_row = i == static_cast<int>(row_count) - 1;
// Split up into blocks of contiguous data and do those together
const size_t row_item_start_iter = row_starts_ptr[i];
const size_t input_items_this_row = !last_row
? static_cast<size_t>(row_starts_ptr[i + 1])
- row_item_start_iter
: item_count - row_item_start_iter;
if (input_items_this_row) {
const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter;
const DType *row_data_start = in + row_item_start_iter;
DType *output_this_row = out[i].dptr_;
// More overhead to use OMP for small loops, so don't
if (input_items_this_row > 1000) {
#pragma omp parallel for
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
} else {
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
}
}
}
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
template<typename xpu, typename OP, typename DType, typename IType>
static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray output) {
mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>();
CHECK_EQ(output.storage_type(), kDefaultStorage);
switch (input.storage_type()) {
case kRowSparseStorage: {
ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output);
break;
}
case kCSRStorage: {
MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, {
ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output);
});
break;
}
default:
CHECK(false) << "Unsupported sparse storage type";
break;
}
}
public:
template<typename OP>
static void Compute_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
mshadow::Stream<cpu>* s,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
TBlob temp_tblob;
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
const double alpha = param.scalar;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
if ((common::is_int(inputs[0].type_flag_) && !scalar_is_int) ||
(inputs[0].type_flag_ == kBool)) {
Tensor<cpu, 1, DType> temp_tensor =
ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(inputs[0].Size()), s);
temp_tblob = TBlob(temp_tensor);
CastCompute<cpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob});
} else {
temp_tblob = inputs[0];
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), temp_tblob.dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
Compute_<OP>(attrs, ctx, s, inputs, req, outputs);
}
template<typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
bool scalar_is_int = param.is_int;
const double alpha = param.scalar;
TBlob temp_tblob;
if (common::is_int(inputs[0].type_flag_) && !scalar_is_int) {
Tensor<xpu, 1, double> temp_tensor =
ctx.requested[0].get_space_typed<xpu, 1, double>(Shape1(inputs[0].Size()), s);
temp_tblob = TBlob(temp_tensor);
CastCompute<xpu>(attrs, ctx, {inputs[0]}, {kWriteTo}, {temp_tblob});
} else {
temp_tblob = inputs[0];
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(temp_tblob.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<bool>(), temp_tblob.dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else if (out_stype == kDefaultStorage &&
(in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]);
});
});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename OP>
static void LogicComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename OP>
static void Backward_(const nnvm::NodeAttrs &attrs,
mshadow::Stream<cpu>* s,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const NumpyBinaryScalarParam& param = nnvm::get<NumpyBinaryScalarParam>(attrs.parsed);
const double alpha = param.scalar;
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req<
mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, cpu>::
Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(),
DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void Backward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
Backward_<OP>(attrs, s, inputs, req, outputs);
}
};
#define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser(ParamParser<NumpyBinaryScalarParam>) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", NumpyBinaryScalarType) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.set_attr<FResourceRequest>("FResourceRequest", \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace}; \
}) \
.add_argument("data", "NDArray-or-Symbol", "source input") \
.add_arguments(NumpyBinaryScalarParam::__FIELDS__())
#if MXNET_USE_CUDA
struct BinaryScalarRTCCompute {
std::string OP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs);
};
struct BinaryScalarRTCBackward {
std::string OP;
void operator()(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs);
};
#endif
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
|
progress_counter.h | /*
* Copyright (C) 2015, Nils Moehrle
* TU Darmstadt - Graphics, Capture and Massively Parallel Computing
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD 3-Clause license. See the LICENSE.txt file for details.
*/
#ifndef TEX_PROGRESSCOUNTER_HEADER
#define TEX_PROGRESSCOUNTER_HEADER
#include <atomic>
#include <fstream>
#include <iostream>
#include <sstream>
#include "util/timer.h"
#include <cmath>
enum ProgressCounterStyle {
ETA,
SIMPLE
};
static const std::string clear = "\r" + std::string(80,' ') + "\r";
class ProgressCounter {
private:
std::ofstream tty;
util::WallTimer timer;
std::string task;
std::size_t max;
std::atomic_size_t count;
public:
ProgressCounter(std::string const & _task, std::size_t max);
template <ProgressCounterStyle T> void progress(void);
void inc(void);
void reset(std::string const & _task);
};
inline
ProgressCounter::ProgressCounter(std::string const & _task, std::size_t _max)
: tty("/dev/tty", std::ios_base::out), timer(),
task(_task), max(_max), count(0) {}
inline void
ProgressCounter::inc(void) {
std::size_t tmp;
tmp = ++count;
if(tmp == max) {
std::stringstream ss;
ss << clear << task << " 100%... done. (Took "
<< timer.get_elapsed_sec() << "s)";
#pragma omp critical(progress_counter_inc)
std::cout << ss.rdbuf() << std::endl;
}
}
inline void
ProgressCounter::reset(std::string const & _task) {
timer.reset();
count = 0;
task = _task;
}
template <ProgressCounterStyle T> void
ProgressCounter::progress(void) {
if ((max > 100 && count % (max / 100) == 0) || max <= 100) {
float percent = static_cast<float>(count) / max;
int ipercent = std::floor(percent * 100.0f + 0.5f);
std::stringstream ss;
ss << clear << task << " " << ipercent << "%...";
if (T == ETA && ipercent > 3){
std::size_t const elapsed = timer.get_elapsed();
std::size_t eta = (elapsed / percent - elapsed) / 1000;
ss << " eta ~ " << eta << " s";
}
#pragma omp critical(progress_counter_progress)
#if defined(_WIN32)
std::cout << ss.rdbuf() << std::flush;
#else
tty << ss.rdbuf() << std::flush;
#endif
}
}
#endif /* TEX_PROGRESSCOUNTER_HEADER */
|
bounds.c | /*---------------------------------------------------------------------------------
BOUNDS.C
-Implements physical boundary conditions
-Ensure no inflow at radial boundaries
-Ensure radial mass flux at radial boundaries is zero
-B2 flux at X1 and X3 faces at polar boundaries is reflected for ghost zones
-All X2 fluxes at polar boundaries are zeroed
---------------------------------------------------------------------------------*/
#include "decs.h"
// Sanity checks: grid dimensions, supported boundary conditions
#if N2 > 1 && N2 < NG
#error "N2 must be >= NG"
#endif
#if X1L_BOUND != PERIODIC && X1L_BOUND != OUTFLOW
#error "Unsupported X1L_BOUND"
#endif
#if X1R_BOUND != PERIODIC && X1R_BOUND != OUTFLOW && X1R_BOUND != USER
#error "Unsupported X1R_BOUND"
#endif
#if X2L_BOUND != PERIODIC && X2L_BOUND != OUTFLOW && X2L_BOUND != POLAR
#error "Unsupported X2L_BOUND"
#endif
#if X2R_BOUND != PERIODIC && X2R_BOUND != OUTFLOW && X2R_BOUND != POLAR
#error "Unsupported X2R_BOUND"
#endif
void inflow_check(struct GridGeom *G, struct FluidState *S, int i, int j, int type);
// Apply boundary conditions along X1 and X2
void set_bounds(struct GridGeom *G, struct FluidState *S)
{
timer_start(TIMER_BOUND);
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOP
{
ISLOOP(-NG, -1)
{
#if N1 < NG
int iactive = NG;
PLOOP S->P[ip][j][i] = S->P[ip][j][iactive];
pflag[j][i] = pflag[j][iactive];
#elif X1L_BOUND == OUTFLOW
int iz = 0 + NG;
PLOOP S->P[ip][j][i] = S->P[ip][j][iz];
pflag[j][i] = pflag[j][iz];
double rescale = G->gdet[CENT][j][iz]/G->gdet[CENT][j][i];
S->P[B1][j][i] *= rescale;
S->P[B2][j][i] *= rescale;
S->P[B3][j][i] *= rescale;
#elif X1L_BOUND == PERIODIC
int iz = N1 + i;
PLOOP S->P[ip][j][i] = S->P[ip][j][iz];
pflag[j][i] = pflag[j][iz];
#endif
}
}
#if METRIC == MKS
if(X1L_INFLOW == 0)
{
// Make sure there is no inflow at the inner boundary
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOP
ISLOOP(-NG, -1)
inflow_check(G, S, i, j, 0);
}
#endif
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOP
{
ISLOOP(N1, N1 - 1 + NG)
{
#if N1 < NG
int iactive = N1 - 1 + NG;
PLOOP S->P[ip][j][i] = S->P[ip][j][iactive];
pflag[j][i] = pflag[j][iactive];
#elif X1R_BOUND == OUTFLOW
int iz = N1 - 1 + NG;
PLOOP S->P[ip][j][i] = S->P[ip][j][iz];
pflag[j][i] = pflag[j][iz];
double rescale = G->gdet[CENT][j][iz]/G->gdet[CENT][j][i];
S->P[B1][j][i] *= rescale;
S->P[B2][j][i] *= rescale;
S->P[B3][j][i] *= rescale;
#elif X1R_BOUND == USER
bound_gas_prob_x1r(i, j, S->P, G);
#elif X1R_BOUND == PERIODIC
int iz = i - N1;
PLOOP S->P[ip][j][i] = S->P[ip][j][iz];
pflag[j][i] = pflag[j][iz];
#endif
}
}
#if METRIC == MKS
if(X1R_INFLOW == 0)
{
// Make sure there is no inflow at the outer boundary
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOP
ISLOOP(N1, N1 - 1 + NG)
inflow_check(G, S, i, j, 1);
}
#endif
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
ILOOPALL
{
JSLOOP(-NG, -1)
{
#if N2 < NG
int jactive = NG;
PLOOP S->P[ip][j][i] = S->P[ip][jactive][i];
pflag[j][i] = pflag[jactive][i];
#elif X2L_BOUND == OUTFLOW
int jz = 0 + NG ;
PLOOP S->P[ip][j][i] = S->P[ip][jz][i];
pflag[j][i] = pflag[jz][i];
#elif X2L_BOUND == POLAR
// Reflect the zone past NG by NG-j
int jrefl = NG + (NG - j) - 1;
PLOOP S->P[ip][j][i] = S->P[ip][jrefl][i];
pflag[j][i] = pflag[jrefl][i];
S->P[U2][j][i] *= -1.;
S->P[B2][j][i] *= -1.;
#elif X2L_BOUND == PERIODIC
int jz = N2 + j;
PLOOP S->P[ip][j][i] = S->P[ip][jz][i];
pflag[j][i] = pflag[jz][i];
#endif
}
}
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
ILOOPALL
{
JSLOOP(N2, N2-1+NG)
{
#if N2 < NG
int jactive = N2 - 1 + NG;
PLOOP S->P[ip][j][i] = S->P[ip][jactive][i];
pflag[j][i] = pflag[jactive][i];
#elif X2R_BOUND == OUTFLOW
int jz = N2 - 1 + NG;
PLOOP S->P[ip][j][i] = S->P[ip][jz][i];
pflag[j][i] = pflag[jz][i];
#elif X2R_BOUND == POLAR
// As j grows beyond N2+NG, reflect the zone that far previous
int jrefl = (N2 + NG) + (N2 + NG - j) - 1;
PLOOP S->P[ip][j][i] = S->P[ip][jrefl][i];
pflag[j][i] = pflag[jrefl][i];
S->P[U2][j][i] *= -1.;
S->P[B2][j][i] *= -1.;
#elif X2R_BOUND == PERIODIC
int jz = j - N2;
PLOOP S->P[ip][j][i] = S->P[ip][jz][i];
pflag[j][i] = pflag[jz][i];
#endif
}
}
timer_stop(TIMER_BOUND);
}
#if METRIC == MKS
void inflow_check(struct GridGeom *G, struct FluidState *S, int i, int j, int type)
{
double alpha, beta1, vsq;
ucon_calc(G, S, i, j, CENT);
if (((S->ucon[1][j][i] > 0.) && (type == 0)) ||
((S->ucon[1][j][i] < 0.) && (type == 1)))
{
// Find gamma and remove it from S->Pitives
double gamma = mhd_gamma_calc(G, S, i, j, CENT);
S->P[U1][j][i] /= gamma;
S->P[U2][j][i] /= gamma;
S->P[U3][j][i] /= gamma;
alpha = G->lapse[CENT][j][i];
beta1 = G->gcon[CENT][0][1][j][i]*alpha*alpha;
// Reset radial velocity so radial 4-velocity is zero
S->P[U1][j][i] = beta1/alpha;
// Now find new gamma and put it back in
vsq = 0.;
for (int mu = 1; mu < NDIM; mu++)
{
for (int nu = 1; nu < NDIM; nu++)
{
vsq += G->gcov[CENT][mu][nu][j][i]*S->P[U1+mu-1][j][i]*S->P[U1+nu-1][j][i];
}
}
if (fabs(vsq) < 1.e-13)
vsq = 1.e-13;
if (vsq >= 1.)
vsq = 1. - 1./(GAMMAMAX*GAMMAMAX);
gamma = 1./sqrt(1. - vsq);
S->P[U1][j][i] *= gamma;
S->P[U2][j][i] *= gamma;
S->P[U3][j][i] *= gamma;
}
}
void fix_flux(struct FluidFlux *F)
{
if (X1L_INFLOW == 0)
{
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOPALL
F->X1[RHO][j][0+NG] = MY_MIN(F->X1[RHO][j][0+NG], 0.);
}
if (X1R_INFLOW == 0)
{
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
JLOOPALL
F->X1[RHO][j][N1+NG] = MY_MAX(F->X1[RHO][j][N1+NG], 0.);
}
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
ILOOPALL
{
F->X1[B2][-1+NG][i] = -F->X1[B2][0+NG][i];
PLOOP F->X2[ip][0+NG][i] = 0.;
}
#if !INTEL_WORKAROUND
#pragma omp parallel for
#endif
ILOOPALL
{
F->X1[B2][N2+NG][i] = -F->X1[B2][N2-1+NG][i];
PLOOP F->X2[ip][N2+NG][i] = 0.;
}
}
#endif // METRIC
|
omptest.c | #include "gptl.h"
#include <stdio.h>
#include <omp.h>
extern void sub (int);
int main ()
{
int ret;
int iter;
double value;
static const char *thisprog = "omptest";
omp_set_num_threads (2);
ret = GPTLinitialize ();
ret = GPTLstart ("main");
ret = GPTLstart ("omp_loop");
#pragma omp parallel for private (iter)
for (iter = 0; iter < 2; ++iter) {
sub (iter);
}
ret = GPTLstop ("omp_loop");
ret = GPTLstop ("main");
// This test should succeed
ret = GPTLget_wallclock ("sub", 1, &value);
if (ret != 0) {
printf ("%s: GPTLget_wallclock failure for thread 1\n", thisprog);
return -1;
}
// This test should fail
ret = GPTLget_wallclock ("sub", 2, &value);
if (ret == 0) {
printf ("%s: GPTLget_wallclock should have failed for thread 2\n", thisprog);
return -1;
}
return 0;
}
void sub (int iter)
{
int ret;
int mythread = omp_get_thread_num();
ret = GPTLstart ("sub");
printf ("iter=%d being processed by thread=%d\n", iter, mythread);
ret = GPTLstop ("sub");
}
|
scalprod.c | #include <omp.h>
#ifdef __cplusplus
extern "C"
#endif
void
scalprod(int n, double* x, double* y, double* res)
{
int i;
double res_v = 0.;
#pragma omp parallel for reduction(+ : res_v)
for (i = 0; i < n; ++i) {
res_v += x[i] * y[i];
}
*res = res_v;
}
|
tparallel.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
int first=0, second=0;
int foo() {
int i, x = 1023;
#pragma omp parallel firstprivate(x) reduction(+:first) if(x>0) num_threads(2)
{
x++;
first += x;
}
#pragma omp parallel firstprivate(x) reduction(+:first) if(0)
{
x++;
first += x;
}
#pragma omp parallel private(i) shared(first) reduction(+:second)
{
second = first;
for (i = 0; i < 16; i++)
second++;
}
omp_set_num_threads(6);
#pragma omp parallel
printf("Thread %d finished the execution of foo\n", omp_get_thread_num());
return(x);
}
int main(int argc, char *argv[]) {
printf("first = %d, second = %d, x = %d\n", first, second, foo());
}
|
Tutorial.h | //=================================================================================================
/*!
// \file blaze/Tutorial.h
// \brief Tutorial of the Blaze library
//
// Copyright (C) 2012-2017 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_TUTORIAL_H_
#define _BLAZE_TUTORIAL_H_
//=================================================================================================
//
// BLAZE TUTORIAL
//
//=================================================================================================
//**Mainpage***************************************************************************************
/*!\mainpage
//
// \image html blaze300x150.jpg
//
// This is the API for the \b Blaze high performance C++ math library. It gives a complete
// overview of the individual features and sublibraries of \b Blaze. To get a first impression
// on \b Blaze, the short \ref getting_started tutorial is a good place to start. Afterwards,
// the following long tutorial covers the most important aspects of the \b Blaze math library.
// The tabs at the top of the page allow a direct access to the individual modules, namespaces,
// classes, and files of the \b Blaze library.\n\n
//
// \section table_of_content Table of Contents
//
// <ul>
// <li> \ref configuration_and_installation </li>
// <li> \ref getting_started </li>
// <li> \ref vectors
// <ul>
// <li> \ref vector_types </li>
// <li> \ref vector_operations </li>
// </ul>
// </li>
// <li> \ref matrices
// <ul>
// <li> \ref matrix_types </li>
// <li> \ref matrix_operations </li>
// </ul>
// </li>
// <li> \ref adaptors
// <ul>
// <li> \ref adaptors_symmetric_matrices </li>
// <li> \ref adaptors_hermitian_matrices </li>
// <li> \ref adaptors_triangular_matrices </li>
// </ul>
// </li>
// <li> \ref views
// <ul>
// <li> \ref views_subvectors </li>
// <li> \ref views_submatrices </li>
// <li> \ref views_rows </li>
// <li> \ref views_columns </li>
// </ul>
// </li>
// <li> \ref arithmetic_operations
// <ul>
// <li> \ref addition </li>
// <li> \ref subtraction </li>
// <li> \ref scalar_multiplication </li>
// <li> \ref vector_vector_multiplication
// <ul>
// <li> \ref componentwise_multiplication </li>
// <li> \ref inner_product </li>
// <li> \ref outer_product </li>
// <li> \ref cross_product </li>
// </ul>
// </li>
// <li> \ref vector_vector_division </li>
// <li> \ref matrix_vector_multiplication </li>
// <li> \ref matrix_matrix_multiplication
// <ul>
// <li> \ref schur_product </li>
// <li> \ref matrix_product </li>
// </ul>
// </li>
// </ul>
// </li>
// <li> \ref shared_memory_parallelization
// <ul>
// <li> \ref openmp_parallelization </li>
// <li> \ref cpp_threads_parallelization </li>
// <li> \ref boost_threads_parallelization </li>
// <li> \ref serial_execution </li>
// </ul>
// </li>
// <li> \ref serialization
// <ul>
// <li> \ref vector_serialization </li>
// <li> \ref matrix_serialization </li>
// </ul>
// </li>
// <li> \ref customization
// <ul>
// <li> \ref configuration_files </li>
// <li> \ref vector_and_matrix_customization
// <ul>
// <li> \ref custom_data_members </li>
// <li> \ref custom_operations </li>
// <li> \ref custom_data_types </li>
// </ul>
// </li>
// <li> \ref error_reporting_customization </li>
// </ul>
// </li>
// <li> \ref blas_functions </li>
// <li> \ref lapack_functions </li>
// <li> \ref block_vectors_and_matrices </li>
// <li> \ref intra_statement_optimization </li>
// </ul>
*/
//*************************************************************************************************
//**Configuration and Installation*****************************************************************
/*!\page configuration_and_installation Configuration and Installation
//
// Since \b Blaze is a header-only library, setting up the \b Blaze library on a particular system
// is a fairly easy two step process. In the following, this two step process is explained in
// detail, preceded only by a short summary of the requirements.
//
//
// \n \section requirements Requirements
// <hr>
//
// For maximum performance the \b Blaze library expects you to have a BLAS library installed
// (<a href="http://software.intel.com/en-us/articles/intel-mkl/">Intel MKL</a>,
// <a href="http://developer.amd.com/libraries/acml/">ACML</a>,
// <a href="http://math-atlas.sourceforge.net">Atlas</a>,
// <a href="http://www.tacc.utexas.edu/tacc-projects/gotoblas2">Goto</a>, ...). If you don't
// have a BLAS library installed on your system, \b Blaze will still work and will not be reduced
// in functionality, but performance may be limited. Thus it is strongly recommended to install a
// BLAS library.
//
// Additionally, for computing the determinant of a dense matrix, for the decomposition of dense
// matrices, for the dense matrix inversion, and for the computation of eigenvalues and singular
// values \b Blaze requires <a href="https://en.wikipedia.org/wiki/LAPACK">LAPACK</a>. When either
// of these features is used it is necessary to link the LAPACK library to the final executable.
// If no LAPACK library is available the use of these features will result in a linker error.
//
// Furthermore, it is possible to use Boost threads to run numeric operations in parallel. In this
// case the Boost library is required to be installed on your system. It is recommended to use the
// newest Boost library available, but \b Blaze requires at minimum the Boost version 1.54.0. If
// you don't have Boost installed on your system, you can download it for free from
// <a href="http://www.boost.org">www.boost.org</a>.
//
//
// \n \section step_1_installation Step 1: Installation
// <hr>
//
// \subsection step_1_cmake Installation via CMake
//
// The first step is the installation of the \b Blaze header files. The most convenient way
// to do this is via <a href="https://cmake.org">CMake</a>. Linux and macOS users can use the
// following two lines to copy the \b Blaze headers in the <tt>./blaze</tt> subdirectory to
// the directory \c ${CMAKE_INSTALL_PREFIX}/include and the package configuration files to
// \c ${CMAKE_INSTALL_PREFIX}/share/blaze/cmake.
\code
cmake -DCMAKE_INSTALL_PREFIX=/usr/local/
sudo make install
\endcode
// Windows users can do the same via the cmake-gui. Alternatively, it is possible to include
// \b Blaze by adding the following lines in any \c CMakeLists.txt file:
\code
find_package( blaze )
if( blaze_FOUND )
add_library( blaze_target INTERFACE )
target_link_libraries( blaze_target INTERFACE blaze::blaze )
endif()
\endcode
// \n \subsection step_1_vcpkg Installation via the VC++ Packaging Tool
//
// An alternate way to install \b Blaze for Windows users is Microsoft's
// <a href="https://github.com/Microsoft/vcpkg">VC++ Packaging Tool (vcpkg)</a>. \b Blaze can
// be installed via the command line:
\code
C:\src\vcpkg> .\vcpkg install blaze
\endcode
// The tool automatically downloads the latest \b Blaze release and copies the header files to
// the common include directory.
//
// \n \subsection step_1_installation_unix Manual Installation on Linux/macOS
//
// Since \b Blaze only consists of header files, the <tt>./blaze</tt> subdirectory can be simply
// copied to a standard include directory (note that this requires root privileges):
\code
cp -r ./blaze /usr/local/include
\endcode
// Alternatively, on Unix-based machines (which includes Linux and Mac OS X) the
// \c CPLUS_INCLUDE_PATH environment variable can be set. The specified directory will be
// searched after any directories specified on the command line with the option \c -I and
// before the standard default directories (such as \c /usr/local/include and \c /usr/include).
// Assuming a user named 'Jon', the environment variable can be set as follows:
\code
CPLUS_INCLUDE_PATH=/usr/home/jon/blaze
export CPLUS_INCLUDE_PATH
\endcode
// Last but not least, the <tt>./blaze</tt> subdirectory can be explicitly specified on the
// command line. The following example demonstrates this by means of the GNU C++ compiler:
\code
g++ -I/usr/home/jon/blaze -o BlazeTest BlazeTest.cpp
\endcode
// \n \subsection step_1_installation_windows Manual Installation on Windows
//
// Windows doesn't have a standard include directory. Therefore the \b Blaze header files can be
// copied to any other directory or simply left in the default \b Blaze directory. However, the
// chosen include directory has to be explicitly specified as include path. In Visual Studio,
// this is done via the project property pages, configuration properties, C/C++, General settings.
// Here the additional include directories can be specified.
//
//
// \n \section step_2_configuration Step 2: Configuration
// <hr>
//
// The second step is the configuration and customization of the \b Blaze library. Many aspects
// of \b Blaze can be adapted to specific requirements, environments and architectures. The most
// convenient way to configure \b Blaze is to modify the headers in the <tt>./blaze/config/</tt>
// subdirectory by means of <a href="https://cmake.org">CMake</a>. Alternatively these header
// files can be customized manually. In both cases, however, the files are modified. If this is
// not an option it is possible to configure \b Blaze via the command line (see the tutorial
// section \ref configuration_files or the documentation in the configuration files).
//
// Since the default settings are reasonable for most systems this step can also be skipped.
// However, in order to achieve maximum performance a customization of at least the following
// configuration files is required:
//
// - <b><tt>./blaze/config/BLAS.h</tt></b>: Via this configuration file \b Blaze can be enabled
// to use a third-party BLAS library for several basic linear algebra functions (such as for
// instance dense matrix multiplications). In case no BLAS library is used, all linear algebra
// functions use the default implementations of the \b Blaze library and therefore BLAS is not a
// requirement for the compilation process. However, please note that performance may be limited.
// - <b><tt>./blaze/config/CacheSize.h</tt></b>: This file contains the hardware specific cache
// settings. \b Blaze uses this information to optimize its cache usage. For maximum performance
// it is recommended to adapt these setting to a specific target architecture.
// - <b><tt>./blaze/config/Thresholds.h</tt></b>: This file contains all thresholds for the
// customization of the \b Blaze compute kernels. In order to tune the kernels for a specific
// architecture and to maximize performance it can be necessary to adjust the thresholds,
// especially for a parallel execution (see \ref shared_memory_parallelization).
//
// For an overview of other customization options and more details, please see the section
// \ref configuration_files.
//
// \n Next: \ref getting_started
*/
//*************************************************************************************************
//**Getting Started********************************************************************************
/*!\page getting_started Getting Started
//
// This short tutorial serves the purpose to give a quick overview of the way mathematical
// expressions have to be formulated in \b Blaze. Starting with \ref vector_types, the following
// long tutorial covers the most important aspects of the \b Blaze math library.
//
//
// \n \section getting_started_vector_example A First Example
//
// \b Blaze is written such that using mathematical expressions is as close to mathematical
// textbooks as possible and therefore as intuitive as possible. In nearly all cases the seemingly
// easiest solution is the right solution and most users experience no problems when trying to
// use \b Blaze in the most natural way. The following example gives a first impression of the
// formulation of a vector addition in \b Blaze:
\code
#include <iostream>
#include <blaze/Math.h>
using blaze::StaticVector;
using blaze::DynamicVector;
// Instantiation of a static 3D column vector. The vector is directly initialized as
// ( 4 -2 5 )
StaticVector<int,3UL> a{ 4, -2, 5 };
// Instantiation of a dynamic 3D column vector. Via the subscript operator the values are set to
// ( 2 5 -3 )
DynamicVector<int> b( 3UL );
b[0] = 2;
b[1] = 5;
b[2] = -3;
// Adding the vectors a and b
DynamicVector<int> c = a + b;
// Printing the result of the vector addition
std::cout << "c =\n" << c << "\n";
\endcode
// Note that the entire \b Blaze math library can be included via the \c blaze/Math.h header
// file. Alternatively, the entire \b Blaze library, including both the math and the entire
// utility module, can be included via the \c blaze/Blaze.h header file. Also note that all
// classes and functions of \b Blaze are contained in the blaze namespace.\n\n
//
// Assuming that this program resides in a source file called \c FirstExample.cpp, it can be
// compiled for instance via the GNU C++ compiler:
\code
g++ -ansi -O3 -DNDEBUG -mavx -o FirstExample FirstExample.cpp
\endcode
// Note the definition of the \c NDEBUG preprocessor symbol. In order to achieve maximum
// performance, it is necessary to compile the program in release mode, which deactivates
// all debugging functionality inside \b Blaze. It is also strongly recommended to specify
// the available architecture specific instruction set (as for instance the AVX instruction
// set, which if available can be activated via the \c -mavx flag). This allows \b Blaze
// to optimize computations via vectorization.\n\n
//
// When running the resulting executable \c FirstExample, the output of the last line of
// this small program is
\code
c =
6
3
2
\endcode
// \n \section getting_started_matrix_example An Example Involving Matrices
//
// Similarly easy and intuitive are expressions involving matrices:
\code
#include <blaze/Math.h>
using namespace blaze;
// Instantiating a dynamic 3D column vector
DynamicVector<int> x{ 4, -1, 3 };
// Instantiating a dynamic 2x3 row-major matrix, preinitialized with 0. Via the function call
// operator three values of the matrix are explicitly set to get the matrix
// ( 1 0 4 )
// ( 0 -2 0 )
DynamicMatrix<int> A( 2UL, 3UL, 0 );
A(0,0) = 1;
A(0,2) = 4;
A(1,1) = -2;
// Performing a matrix/vector multiplication
DynamicVector<int> y = A * x;
// Printing the resulting vector
std::cout << "y =\n" << y << "\n";
// Instantiating a static column-major matrix. The matrix is directly initialized as
// ( 3 -1 )
// ( 0 2 )
// ( -1 0 )
StaticMatrix<int,3UL,2UL,columnMajor> B{ { 3, -1 }, { 0, 2 }, { -1, 0 } };
// Performing a matrix/matrix multiplication
DynamicMatrix<int> C = A * B;
// Printing the resulting matrix
std::cout << "C =\n" << C << "\n";
\endcode
// The output of this program is
\code
y =
16
2
C =
( -1 -1 )
( 0 4 )
\endcode
// \n \section getting_started_complex_example A Complex Example
//
// The following example is much more sophisticated. It shows the implementation of the Conjugate
// Gradient (CG) algorithm (http://en.wikipedia.org/wiki/Conjugate_gradient) by means of the
// \b Blaze library:
//
// \image html cg.jpg
//
// In this example it is not important to understand the CG algorithm itself, but to see the
// advantage of the API of the \b Blaze library. In the \b Blaze implementation we will use a
// sparse matrix/dense vector multiplication for a 2D Poisson equation using \f$ N \times N \f$
// unknowns. It becomes apparent that the core of the algorithm is very close to the mathematical
// formulation and therefore has huge advantages in terms of readability and maintainability,
// while the performance of the code is close to the expected theoretical peak performance:
\code
const size_t NN( N*N );
blaze::CompressedMatrix<double,rowMajor> A( NN, NN );
blaze::DynamicVector<double,columnVector> x( NN, 1.0 ), b( NN, 0.0 ), r( NN ), p( NN ), Ap( NN );
double alpha, beta, delta;
// ... Initializing the sparse matrix A
// Performing the CG algorithm
r = b - A * x;
p = r;
delta = (r,r);
for( size_t iteration=0UL; iteration<iterations; ++iteration )
{
Ap = A * p;
alpha = delta / (p,Ap);
x += alpha * p;
r -= alpha * Ap;
beta = (r,r);
if( std::sqrt( beta ) < 1E-8 ) break;
p = r + ( beta / delta ) * p;
delta = beta;
}
\endcode
// \n Hopefully this short tutorial gives a good first impression of how mathematical expressions
// are formulated with \b Blaze. The following long tutorial, starting with \ref vector_types,
// will cover all aspects of the \b Blaze math library, i.e. it will introduce all vector and
// matrix types, all possible operations on vectors and matrices, and of course all possible
// mathematical expressions.
//
// \n Previous: \ref configuration_and_installation Next: \ref vectors
*/
//*************************************************************************************************
//**Vectors****************************************************************************************
/*!\page vectors Vectors
//
// \tableofcontents
//
//
// \n \section vectors_general General Concepts
// <hr>
//
// The \b Blaze library currently offers four dense vector types (\ref vector_types_static_vector,
// \ref vector_types_dynamic_vector, \ref vector_types_hybrid_vector, and \ref vector_types_custom_vector)
// and one sparse vector type (\ref vector_types_compressed_vector). All vectors can be specified
// as either column vectors or row vectors:
\code
using blaze::DynamicVector;
using blaze::columnVector;
using blaze::rowVector;
// Setup of the 3-dimensional dense column vector
//
// ( 1 )
// ( 2 )
// ( 3 )
//
DynamicVector<int,columnVector> a{ 1, 2, 3 };
// Setup of the 3-dimensional dense row vector
//
// ( 4 5 6 )
//
DynamicVector<int,rowVector> b{ 4, 5, 6 };
\endcode
// Per default, all vectors in \b Blaze are column vectors:
\code
// Instantiation of a 3-dimensional column vector
blaze::DynamicVector<int> c( 3UL );
\endcode
// \n \section vectors_details Vector Details
// <hr>
//
// - \ref vector_types
// - \ref vector_operations
//
//
// \n \section vectors_examples Examples
// <hr>
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowVector;
using blaze::columnVector;
StaticVector<int,6UL> a; // Instantiation of a 6-dimensional static column vector
CompressedVector<int,rowVector> b; // Instantiation of a compressed row vector
DynamicVector<int,columnVector> c; // Instantiation of a dynamic column vector
// ... Resizing and initialization
c = a + trans( b );
\endcode
// \n Previous: \ref getting_started Next: \ref vector_types
*/
//*************************************************************************************************
//**Vector Types***********************************************************************************
/*!\page vector_types Vector Types
//
// \tableofcontents
//
//
// \n \section vector_types_static_vector StaticVector
// <hr>
//
// The blaze::StaticVector class template is the representation of a fixed size vector with
// statically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/StaticVector.h>
\endcode
// The type of the elements, the number of elements, and the transpose flag of the vector can
// be specified via the three template parameters:
\code
template< typename Type, size_t N, bool TF >
class StaticVector;
\endcode
// - \c Type: specifies the type of the vector elements. StaticVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c N : specifies the total number of vector elements. It is expected that StaticVector is
// only used for tiny and small vectors.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::StaticVector is perfectly suited for small to medium vectors whose size is known at
// compile time:
\code
// Definition of a 3-dimensional integral column vector
blaze::StaticVector<int,3UL> a;
// Definition of a 4-dimensional single precision column vector
blaze::StaticVector<float,4UL,blaze::columnVector> b;
// Definition of a 6-dimensional double precision row vector
blaze::StaticVector<double,6UL,blaze::rowVector> c;
\endcode
// \n \section vector_types_dynamic_vector DynamicVector
// <hr>
//
// The blaze::DynamicVector class template is the representation of an arbitrary sized vector
// with dynamically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/DynamicVector.h>
\endcode
// The type of the elements and the transpose flag of the vector can be specified via the two
// template parameters:
\code
template< typename Type, bool TF >
class DynamicVector;
\endcode
// - \c Type: specifies the type of the vector elements. DynamicVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::DynamicVector is the default choice for all kinds of dense vectors and the best
// choice for medium to large vectors. Its size can be modified at runtime:
\code
// Definition of a 3-dimensional integral column vector
blaze::DynamicVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector
blaze::DynamicVector<float,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0
blaze::DynamicVector<double,blaze::rowVector> c;
\endcode
// \n \section vector_types_hybrid_vector HybridVector
// <hr>
//
// The blaze::HybridVector class template combines the advantages of the blaze::StaticVector and
// the blaze::DynamicVector class templates. It represents a fixed size vector with statically
// allocated elements, but still can be dynamically resized (within the bounds of the available
// memory). It can be included via the header file
\code
#include <blaze/math/HybridVector.h>
\endcode
// The type of the elements, the number of elements, and the transpose flag of the vector can
// be specified via the three template parameters:
\code
template< typename Type, size_t N, bool TF >
class HybridVector;
\endcode
// - \c Type: specifies the type of the vector elements. HybridVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c N : specifies the maximum number of vector elements. It is expected that HybridVector
// is only used for tiny and small vectors.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::HybridVector is a suitable choice for small to medium vectors, whose size is not
// known at compile time or not fixed at runtime, but whose maximum size is known at compile
// time:
\code
// Definition of a 3-dimensional integral column vector with a maximum size of 6
blaze::HybridVector<int,6UL> a( 3UL );
// Definition of a 4-dimensional single precision column vector with a maximum size of 16
blaze::HybridVector<float,16UL,blaze::columnVector> b( 4UL );
// Definition of a double precision row vector with size 0 and a maximum size of 6
blaze::HybridVector<double,6UL,blaze::rowVector> c;
\endcode
// \n \section vector_types_custom_vector CustomVector
// <hr>
//
// The blaze::CustomVector class template provides the functionality to represent an external
// array of elements of arbitrary type and a fixed size as a native \b Blaze dense vector data
// structure. Thus in contrast to all other dense vector types a custom vector does not perform
// any kind of memory allocation by itself, but it is provided with an existing array of element
// during construction. A custom vector can therefore be considered an alias to the existing
// array. It can be included via the header file
\code
#include <blaze/math/CustomVector.h>
\endcode
// The type of the elements, the properties of the given array of elements and the transpose
// flag of the vector can be specified via the following four template parameters:
\code
template< typename Type, bool AF, bool PF, bool TF >
class CustomVector;
\endcode
// - Type: specifies the type of the vector elements. blaze::CustomVector can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - AF : specifies whether the represented, external arrays are properly aligned with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - PF : specified whether the represented, external arrays are properly padded with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::CustomVector is the right choice if any external array needs to be represented as
// a \b Blaze dense vector data structure or if a custom memory allocation strategy needs to be
// realized:
\code
using blaze::CustomVector;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of an unmanaged custom column vector for unaligned, unpadded integer arrays
typedef CustomVector<int,unaligned,unpadded,columnVector> UnalignedUnpadded;
std::vector<int> vec( 7UL );
UnalignedUnpadded a( &vec[0], 7UL );
// Definition of a managed custom column vector for unaligned but padded 'float' arrays
typedef CustomVector<float,unaligned,padded,columnVector> UnalignedPadded;
UnalignedPadded b( new float[16], 9UL, 16UL, blaze::ArrayDelete() );
// Definition of a managed custom row vector for aligned, unpadded 'double' arrays
typedef CustomVector<double,aligned,unpadded,rowVector> AlignedUnpadded;
AlignedUnpadded c( blaze::allocate<double>( 7UL ), 7UL, blaze::Deallocate() );
// Definition of a managed custom row vector for aligned, padded 'complex<double>' arrays
typedef CustomVector<complex<double>,aligned,padded,columnVector> AlignedPadded;
AlignedPadded d( allocate< complex<double> >( 8UL ), 5UL, 8UL, blaze::Deallocate() );
\endcode
// In comparison with the remaining \b Blaze dense vector types blaze::CustomVector has several
// special characteristics. All of these result from the fact that a custom vector is not
// performing any kind of memory allocation, but instead is given an existing array of elements.
// The following sections discuss all of these characteristics:
//
// -# <b>\ref vector_types_custom_vector_memory_management</b>
// -# <b>\ref vector_types_custom_vector_copy_operations</b>
// -# <b>\ref vector_types_custom_vector_alignment</b>
// -# <b>\ref vector_types_custom_vector_padding</b>
//
// \n \subsection vector_types_custom_vector_memory_management Memory Management
//
// The blaze::CustomVector class template acts as an adaptor for an existing array of elements. As
// such it provides everything that is required to use the array just like a native \b Blaze dense
// vector data structure. However, this flexibility comes with the price that the user of a custom
// vector is responsible for the resource management.
//
// The following examples give an impression of several possible types of custom vectors:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of a 3-dimensional custom vector with unaligned, unpadded and externally
// managed integer array. Note that the std::vector must be guaranteed to outlive the
// custom vector!
std::vector<int> vec( 3UL );
CustomVector<int,unaligned,unpadded> a( &vec[0], 3UL );
// Definition of a custom vector with size 3 and capacity 16 with aligned, padded and
// externally managed integer array. Note that the std::unique_ptr must be guaranteed
// to outlive the custom vector!
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 16UL ) );
CustomVector<int,aligned,padded> b( memory.get(), 3UL, 16UL );
\endcode
// \n \subsection vector_types_custom_vector_copy_operations Copy Operations
//
// As with all dense vectors it is possible to copy construct a custom vector:
\code
using blaze::CustomVector;
using blaze::unaligned;
using blaze::unpadded;
typedef CustomVector<int,unaligned,unpadded> CustomType;
std::vector<int> vec( 5UL, 10 ); // Vector of 5 integers of the value 10
CustomType a( &vec[0], 5UL ); // Represent the std::vector as Blaze dense vector
a[1] = 20; // Also modifies the std::vector
CustomType b( a ); // Creating a copy of vector a
b[2] = 20; // Also affects vector a and the std::vector
\endcode
// It is important to note that a custom vector acts as a reference to the specified array. Thus
// the result of the copy constructor is a new custom vector that is referencing and representing
// the same array as the original custom vector.
//
// In contrast to copy construction, just as with references, copy assignment does not change
// which array is referenced by the custom vector, but modifies the values of the array:
\code
std::vector<int> vec2( 5UL, 4 ); // Vector of 5 integers of the value 4
CustomType c( &vec2[0], 5UL ); // Represent the std::vector as Blaze dense vector
a = c; // Copy assignment: Set all values of vector a and b to 4.
\endcode
// \n \subsection vector_types_custom_vector_alignment Alignment
//
// In case the custom vector is specified as \c aligned the passed array must be guaranteed to
// be aligned according to the requirements of the used instruction set (SSE, AVX, ...). For
// instance, if AVX is active an array of integers must be 32-bit aligned:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
// Allocation of 32-bit aligned memory
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 5UL ) );
CustomVector<int,aligned,unpadded> a( memory.get(), 5UL );
\endcode
// In case the alignment requirements are violated, a \c std::invalid_argument exception is
// thrown.
//
// \n \subsection vector_types_custom_vector_padding Padding
//
// Adding padding elements to the end of an array can have a significant impact on the performance.
// For instance, assuming that AVX is available, then two aligned, padded, 3-dimensional vectors
// of double precision values can be added via a single SIMD addition operation:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
typedef CustomVector<double,aligned,padded> CustomType;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 4UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 4UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 4UL ) );
// Creating padded custom vectors of size 3 and a capacity of 4
CustomType a( memory1.get(), 3UL, 4UL );
CustomType b( memory2.get(), 3UL, 4UL );
CustomType c( memory3.get(), 3UL, 4UL );
// ... Initialization
c = a + b; // AVX-based vector addition
\endcode
// In this example, maximum performance is possible. However, in case no padding elements are
// inserted, a scalar addition has to be used:
\code
using blaze::CustomVector;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
typedef CustomVector<double,aligned,unpadded> CustomType;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 3UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 3UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 3UL ) );
// Creating unpadded custom vector of size 3
CustomType a( allocate<double>( 3UL ), 3UL );
CustomType b( allocate<double>( 3UL ), 3UL );
CustomType c( allocate<double>( 3UL ), 3UL );
// ... Initialization
c = a + b; // Scalar vector addition
\endcode
// Note the different number of constructor parameters for unpadded and padded custom vectors:
// In contrast to unpadded vectors, where during the construction only the size of the array
// has to be specified, during the construction of a padded custom vector it is additionally
// necessary to explicitly specify the capacity of the array.
//
// The number of padding elements is required to be sufficient with respect to the available
// instruction set: In case of an aligned padded custom vector the added padding elements must
// guarantee that the capacity is greater or equal than the size and a multiple of the SIMD vector
// width. In case of unaligned padded vectors the number of padding elements can be greater or
// equal the number of padding elements of an aligned padded custom vector. In case the padding
// is insufficient with respect to the available instruction set, a \a std::invalid_argument
// exception is thrown.
//
// Please also note that \b Blaze will zero initialize the padding elements in order to achieve
// maximum performance!
//
//
// \n \section vector_types_compressed_vector CompressedVector
// <hr>
//
// The blaze::CompressedVector class is the representation of an arbitrarily sized sparse
// vector, which stores only non-zero elements of arbitrary type. It can be included via the
// header file
\code
#include <blaze/math/CompressedVector.h>
\endcode
// The type of the elements and the transpose flag of the vector can be specified via the two
// template parameters:
\code
template< typename Type, bool TF >
class CompressedVector;
\endcode
// - \c Type: specifies the type of the vector elements. CompressedVector can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - \c TF : specifies whether the vector is a row vector (\c blaze::rowVector) or a column
// vector (\c blaze::columnVector). The default value is \c blaze::columnVector.
//
// The blaze::CompressedVector is the right choice for all kinds of sparse vectors:
\code
// Definition of a 3-dimensional integral column vector
blaze::CompressedVector<int> a( 3UL );
// Definition of a 4-dimensional single precision column vector with capacity for 3 non-zero elements
blaze::CompressedVector<float,blaze::columnVector> b( 4UL, 3UL );
// Definition of a double precision row vector with size 0
blaze::CompressedVector<double,blaze::rowVector> c;
\endcode
// \n Previous: \ref vectors Next: \ref vector_operations
*/
//*************************************************************************************************
//**Vector Operations******************************************************************************
/*!\page vector_operations Vector Operations
//
// \tableofcontents
//
//
// \n \section vector_operations_constructors Constructors
// <hr>
//
// Instantiating and setting up a vector is very easy and intuitive. However, there are a few
// rules to take care of:
// - In case the last template parameter (the transpose flag) is omitted, the vector is per
// default a column vector.
// - The elements of a \c StaticVector or \c HybridVector are default initialized (i.e. built-in
// data types are initialized to 0, class types are initialized via the default constructor).
// - Newly allocated elements of a \c DynamicVector or \c CompressedVector remain uninitialized
// if they are of built-in type and are default constructed if they are of class type.
//
// \n \subsection vector_operations_default_construction Default Construction
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::CompressedVector;
// All vectors can be default constructed. Whereas the size
// of StaticVectors is fixed via the second template parameter,
// the initial size of a default constructed DynamicVector or
// CompressedVector is 0.
StaticVector<int,2UL> v1; // Instantiation of a 2D integer column vector.
// All elements are initialized to 0.
StaticVector<long,3UL,columnVector> v2; // Instantiation of a 3D long integer column vector.
// Again, all elements are initialized to 0L.
DynamicVector<float> v3; // Instantiation of a dynamic single precision column
// vector of size 0.
DynamicVector<double,rowVector> v4; // Instantiation of a dynamic double precision row
// vector of size 0.
CompressedVector<int> v5; // Instantiation of a compressed integer column
// vector of size 0.
CompressedVector<double,rowVector> v6; // Instantiation of a compressed double precision row
// vector of size 0.
\endcode
// \n \subsection vector_operations_size_construction Construction with Specific Size
//
// The \c DynamicVector, \c HybridVector and \c CompressedVector classes offer a constructor that
// allows to immediately give the vector the required size. Whereas both dense vectors (i.e.
// \c DynamicVector and \c HybridVector) use this information to allocate memory for all vector
// elements, \c CompressedVector merely acquires the size but remains empty.
\code
DynamicVector<int,columnVector> v7( 9UL ); // Instantiation of an integer dynamic column vector
// of size 9. The elements are NOT initialized!
HybridVector< complex<float>, 5UL > v8( 2UL ); // Instantiation of a column vector with two single
// precision complex values. The elements are
// default constructed.
CompressedVector<int,rowVector> v9( 10UL ); // Instantiation of a compressed row vector with
// size 10. Initially, the vector provides no
// capacity for non-zero elements.
\endcode
// \n \subsection vector_operations_initialization_constructors Initialization Constructors
//
// All dense vector classes offer a constructor that allows for a direct, homogeneous initialization
// of all vector elements. In contrast, for sparse vectors the predicted number of non-zero elements
// can be specified
\code
StaticVector<int,3UL,rowVector> v10( 2 ); // Instantiation of a 3D integer row vector.
// All elements are initialized to 2.
DynamicVector<float> v11( 3UL, 7.0F ); // Instantiation of a dynamic single precision
// column vector of size 3. All elements are
// set to 7.0F.
CompressedVector<float,rowVector> v12( 15UL, 3UL ); // Instantiation of a single precision column
// vector of size 15, which provides enough
// space for at least 3 non-zero elements.
\endcode
// \n \subsection vector_operations_array_construction Array Construction
//
// Alternatively, all dense vector classes offer a constructor for an initialization with a dynamic
// or static array. If the vector is initialized from a dynamic array, the constructor expects the
// actual size of the array as first argument, the array as second argument. In case of a static
// array, the fixed size of the array is used:
\code
const unique_ptr<double[]> array1( new double[2] );
// ... Initialization of the dynamic array
blaze::StaticVector<double,2UL> v13( 2UL, array1.get() );
int array2[4] = { 4, -5, -6, 7 };
blaze::StaticVector<int,4UL> v14( array2 );
\endcode
// \n \subsection vector_operations_initializer_list_construction Initializer List Construction
//
// In addition, all dense vector classes can be directly initialized by means of an initializer
// list:
\code
blaze::DynamicVector<float> v15{ 1.0F, 2.0F, 3.0F, 4.0F };
\endcode
// \n \subsection vector_operations_copy_construction Copy Construction
//
// All dense and sparse vectors can be created as the copy of any other dense or sparse vector
// with the same transpose flag (i.e. blaze::rowVector or blaze::columnVector).
\code
StaticVector<int,9UL,columnVector> v16( v7 ); // Instantiation of the dense column vector v16
// as copy of the dense column vector v7.
DynamicVector<int,rowVector> v17( v9 ); // Instantiation of the dense row vector v17 as
// copy of the sparse row vector v9.
CompressedVector<int,columnVector> v18( v1 ); // Instantiation of the sparse column vector v18
// as copy of the dense column vector v1.
CompressedVector<float,rowVector> v19( v12 ); // Instantiation of the sparse row vector v19 as
// copy of the row vector v12.
\endcode
// Note that it is not possible to create a \c StaticVector as a copy of a vector with a different
// size:
\code
StaticVector<int,5UL,columnVector> v23( v7 ); // Runtime error: Size does not match!
StaticVector<int,4UL,rowVector> v24( v10 ); // Compile time error: Size does not match!
\endcode
// \n \section vector_operations_assignment Assignment
// <hr>
//
// There are several types of assignment to dense and sparse vectors:
// \ref vector_operations_homogeneous_assignment, \ref vector_operations_array_assignment,
// \ref vector_operations_copy_assignment, and \ref vector_operations_compound_assignment.
//
// \n \subsection vector_operations_homogeneous_assignment Homogeneous Assignment
//
// Sometimes it may be necessary to assign the same value to all elements of a dense vector.
// For this purpose, the assignment operator can be used:
\code
blaze::StaticVector<int,3UL> v1;
blaze::DynamicVector<double> v2;
// Setting all integer elements of the StaticVector to 2
v1 = 2;
// Setting all double precision elements of the DynamicVector to 5.0
v2 = 5.0;
\endcode
// \n \subsection vector_operations_array_assignment Array Assignment
//
// Dense vectors can also be assigned a static array:
\code
blaze::StaticVector<float,2UL> v1;
blaze::DynamicVector<double,rowVector> v2;
float array1[2] = { 1.0F, 2.0F };
double array2[5] = { 2.1, 4.0, -1.7, 8.6, -7.2 };
v1 = array1;
v2 = array2;
\endcode
// \n \subsection vector_operations_initializer_list_assignment Initializer List Assignment
//
// Alternatively, it is possible to directly assign an initializer list to a dense vector:
\code
blaze::StaticVector<float,2UL> v1;
blaze::DynamicVector<double,rowVector> v2;
v1 = { 1.0F, 2.0F };
v2 = { 2.1, 4.0, -1.7, 8.6, -7.2 };
\endcode
// \n \subsection vector_operations_copy_assignment Copy Assignment
//
// For all vector types it is generally possible to assign another vector with the same transpose
// flag (i.e. blaze::columnVector or blaze::rowVector). Note that in case of \c StaticVectors, the
// assigned vector is required to have the same size as the \c StaticVector since the size of a
// \c StaticVector cannot be adapted!
\code
blaze::StaticVector<int,3UL,columnVector> v1;
blaze::DynamicVector<int,columnVector> v2( 3UL );
blaze::DynamicVector<float,columnVector> v3( 5UL );
blaze::CompressedVector<int,columnVector> v4( 3UL );
blaze::CompressedVector<float,rowVector> v5( 3UL );
// ... Initialization of the vectors
v1 = v2; // OK: Assignment of a 3D dense column vector to another 3D dense column vector
v1 = v4; // OK: Assignment of a 3D sparse column vector to a 3D dense column vector
v1 = v3; // Runtime error: Cannot assign a 5D vector to a 3D static vector
v1 = v5; // Compilation error: Cannot assign a row vector to a column vector
\endcode
// \n \subsection vector_operations_compound_assignment Compound Assignment
//
// Next to plain assignment, it is also possible to use addition assignment, subtraction
// assignment, and multiplication assignment. Note however, that in contrast to plain assignment
// the size and the transpose flag of the vectors has be to equal in order to able to perform a
// compound assignment.
\code
blaze::StaticVector<int,5UL,columnVector> v1;
blaze::DynamicVector<int,columnVector> v2( 5UL );
blaze::CompressedVector<float,columnVector> v3( 7UL );
blaze::DynamicVector<float,rowVector> v4( 7UL );
blaze::CompressedVector<float,rowVector> v5( 7UL );
// ... Initialization of the vectors
v1 += v2; // OK: Addition assignment between two column vectors of the same size
v1 += v3; // Runtime error: No compound assignment between vectors of different size
v1 -= v4; // Compilation error: No compound assignment between vectors of different transpose flag
v4 *= v5; // OK: Multiplication assignment between two row vectors of the same size
\endcode
// \n \section vector_operations_element_access Element Access
// <hr>
//
// The easiest and most intuitive way to access a dense or sparse vector is via the subscript
// operator. The indices to access a vector are zero-based:
\code
blaze::DynamicVector<int> v1( 5UL );
v1[0] = 1;
v1[1] = 3;
// ...
blaze::CompressedVector<float> v2( 5UL );
v2[2] = 7.3F;
v2[4] = -1.4F;
\endcode
// Whereas using the subscript operator on a dense vector only accesses the already existing
// element, accessing an element of a sparse vector via the subscript operator potentially
// inserts the element into the vector and may therefore be more expensive. Consider the
// following example:
\code
blaze::CompressedVector<int> v1( 10UL );
for( size_t i=0UL; i<v1.size(); ++i ) {
... = v1[i];
}
\endcode
// Although the compressed vector is only used for read access within the for loop, using the
// subscript operator temporarily inserts 10 non-zero elements into the vector. Therefore, all
// vectors (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(),
// \c end(), and \c cend() functions to traverse the currently contained elements by iterators.
// In case of non-const vectors, \c begin() and \c end() return an \c Iterator, which allows a
// manipulation of the non-zero value, in case of a constant vector or in case \c cbegin() or
// \c cend() are used a \c ConstIterator is returned:
\code
using blaze::CompressedVector;
CompressedVector<int> v1( 10UL );
// ... Initialization of the vector
// Traversing the vector by Iterator
for( CompressedVector<int>::Iterator it=v1.begin(); it!=v1.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
// Traversing the vector by ConstIterator
for( CompressedVector<int>::ConstIterator it=v1.cbegin(); it!=v1.cend(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
\endcode
// Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions:
\code
for( CompressedVector<int>::Iterator it=begin( v1 ); it!=end( v1 ); ++it ) {
// ...
}
for( CompressedVector<int>::ConstIterator it=cbegin( v1 ); it!=cend( v1 ); ++it ) {
// ...
}
\endcode
// \n \section vector_operations_element_insertion Element Insertion
// <hr>
//
// In contrast to dense vectors, that store all elements independent of their value and that
// offer direct access to all elements, spares vectors only store the non-zero elements contained
// in the vector. Therefore it is necessary to explicitly add elements to the vector. The first
// option to add elements to a sparse vector is the subscript operator:
\code
using blaze::CompressedVector;
CompressedVector<int> v1( 3UL );
v1[1] = 2;
\endcode
// In case the element at the given index is not yet contained in the vector, it is automatically
// inserted. Otherwise the old value is replaced by the new value 2. The operator returns a
// reference to the sparse vector element.\n
// An alternative is the \c set() function: In case the element is not yet contained in the vector
// the element is inserted, else the element's value is modified:
\code
// Insert or modify the value at index 3
v1.set( 3, 1 );
\endcode
// However, insertion of elements can be better controlled via the \c insert() function. In contrast
// to the subscript operator and the \c set() function it emits an exception in case the element is
// already contained in the vector. In order to check for this case, the \c find() function can be
// used:
\code
// In case the element at index 4 is not yet contained in the matrix it is inserted
// with a value of 6.
if( v1.find( 4 ) == v1.end() )
v1.insert( 4, 6 );
\endcode
// Although the \c insert() function is very flexible, due to performance reasons it is not suited
// for the setup of large sparse vectors. A very efficient, yet also very low-level way to fill
// a sparse vector is the \c append() function. It requires the sparse vector to provide enough
// capacity to insert a new element. Additionally, the index of the new element must be larger
// than the index of the previous element. Violating these conditions results in undefined
// behavior!
\code
v1.reserve( 10 ); // Reserving space for 10 non-zero elements
v1.append( 5, -2 ); // Appending the element -2 at index 5
v1.append( 6, 4 ); // Appending the element 4 at index 6
// ...
\endcode
// \n \section vector_operations_non_modifying_operations Non-Modifying Operations
// <hr>
//
// \subsection vector_operations_size .size()
//
// Via the \c size() member function, the current size of a dense or sparse vector can be queried:
\code
// Instantiating a dynamic vector with size 10
blaze::DynamicVector<int> v1( 10UL );
v1.size(); // Returns 10
// Instantiating a compressed vector with size 12 and capacity for 3 non-zero elements
blaze::CompressedVector<double> v2( 12UL, 3UL );
v2.size(); // Returns 12
\endcode
// Alternatively, the free function \c size() can be used to query to current size of a vector.
// In contrast to the member function, the free function can also be used to query the size of
// vector expressions:
\code
size( v1 ); // Returns 10, i.e. has the same effect as the member function
size( v2 ); // Returns 12, i.e. has the same effect as the member function
blaze::DynamicMatrix<int> A( 15UL, 12UL );
size( A * v2 ); // Returns 15, i.e. the size of the resulting vector
\endcode
// \n \subsection vector_operations_capacity .capacity()
//
// Via the \c capacity() (member) function the internal capacity of a dense or sparse vector
// can be queried. Note that the capacity of a vector doesn't have to be equal to the size
// of a vector. In case of a dense vector the capacity will always be greater or equal than
// the size of the vector, in case of a sparse vector the capacity may even be less than
// the size.
\code
v1.capacity(); // Returns at least 10
\endcode
// For symmetry reasons, there is also a free function /c capacity() available that can be used
// to query the capacity:
\code
capacity( v1 ); // Returns at least 10, i.e. has the same effect as the member function
\endcode
// Note, however, that it is not possible to query the capacity of a vector expression:
\code
capacity( A * v1 ); // Compilation error!
\endcode
// \n \subsection vector_operations_nonzeros .nonZeros()
//
// For both dense and sparse vectors the number of non-zero elements can be determined via the
// \c nonZeros() member function. Sparse vectors directly return their number of non-zero
// elements, dense vectors traverse their elements and count the number of non-zero elements.
\code
v1.nonZeros(); // Returns the number of non-zero elements in the dense vector
v2.nonZeros(); // Returns the number of non-zero elements in the sparse vector
\endcode
// There is also a free function \c nonZeros() available to query the current number of non-zero
// elements:
\code
nonZeros( v1 ); // Returns the number of non-zero elements in the dense vector
nonZeros( v2 ); // Returns the number of non-zero elements in the sparse vector
\endcode
// The free \c nonZeros() function can also be used to query the number of non-zero elements in
// a vector expression. However, the result is not the exact number of non-zero elements, but
// may be a rough estimation:
\code
nonZeros( A * v1 ); // Estimates the number of non-zero elements in the vector expression
\endcode
// \n \subsection vector_operations_isnan isnan()
//
// The \c isnan() function provides the means to check a dense or sparse vector for non-a-number
// elements:
\code
blaze::DynamicVector<double> a;
// ... Resizing and initialization
if( isnan( a ) ) { ... }
\endcode
\code
blaze::CompressedVector<double> a;
// ... Resizing and initialization
if( isnan( a ) ) { ... }
\endcode
// If at least one element of the vector is not-a-number, the function returns \c true, otherwise
// it returns \c false. Please note that this function only works for vectors with floating point
// elements. The attempt to use it for a vector with a non-floating point element type results in
// a compile time error.
//
//
// \n \subsection vector_operations_isdefault isDefault()
//
// The \c isDefault() function returns whether the given dense or sparse vector is in default state:
\code
blaze::HybridVector<int,20UL> a;
// ... Resizing and initialization
if( isDefault( a ) ) { ... }
\endcode
// A vector is in default state if it appears to just have been default constructed. All resizable
// vectors (\c HybridVector, \c DynamicVector, or \c CompressedVector) and \c CustomVector are
// in default state if its size is equal to zero. A non-resizable vector (\c StaticVector, all
// subvectors, rows, and columns) is in default state if all its elements are in default state.
// For instance, in case the vector is instantiated for a built-in integral or floating point data
// type, the function returns \c true in case all vector elements are 0 and \c false in case any
// vector element is not 0.
//
//
// \n \subsection vector_operations_isUniform isUniform()
//
// In order to check if all vector elements are identical, the \c isUniform function can be used:
\code
blaze::DynamicVector<int> a;
// ... Resizing and initialization
if( isUniform( a ) ) { ... }
\endcode
// Note that in case of sparse vectors also the zero elements are also taken into account!
//
//
// \n \subsection vector_operations_length length() / sqrLength()
//
// In order to calculate the length (magnitude) of a dense or sparse vector, both the \c length()
// and \c sqrLength() function can be used:
\code
blaze::StaticVector<float,3UL,rowVector> v{ -1.2F, 2.7F, -2.3F };
const float len = length ( v ); // Computes the current length of the vector
const float sqrlen = sqrLength( v ); // Computes the square length of the vector
\endcode
// Note that both functions can only be used for vectors with built-in or complex element type!
//
//
// \n \subsection vector_operations_vector_trans trans()
//
// As already mentioned, vectors can either be column vectors (blaze::columnVector) or row vectors
// (blaze::rowVector). A column vector cannot be assigned to a row vector and vice versa. However,
// vectors can be transposed via the \c trans() function:
\code
blaze::DynamicVector<int,columnVector> v1( 4UL );
blaze::CompressedVector<int,rowVector> v2( 4UL );
v1 = v2; // Compilation error: Cannot assign a row vector to a column vector
v1 = trans( v2 ); // OK: Transposing the row vector to a column vector and assigning it
// to the column vector v1
v2 = trans( v1 ); // OK: Transposing the column vector v1 and assigning it to the row vector v2
v1 += trans( v2 ); // OK: Addition assignment of two column vectors
\endcode
// \n \subsection vector_operations_ctrans ctrans()
//
// It is also possible to compute the conjugate transpose of a vector. This operation is available
// via the \c ctrans() function:
\code
blaze::CompressedVector< complex<float>, rowVector > v1( 4UL );
blaze::DynamicVector< complex<float>, columnVector > v2( 4UL );
v1 = ctrans( v2 ); // Compute the conjugate transpose vector
\endcode
// Note that the \c ctrans() function has the same effect as manually applying the \c conj() and
// \c trans() function in any order:
\code
v1 = trans( conj( v2 ) ); // Computing the conjugate transpose vector
v1 = conj( trans( v2 ) ); // Computing the conjugate transpose vector
\endcode
// \n \subsection vector_operations_evaluate eval() / evaluate()
//
// The \c evaluate() function forces an evaluation of the given vector expression and enables
// an automatic deduction of the correct result type of an operation. The following code example
// demonstrates its intended use for the multiplication of a dense and a sparse vector:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
blaze::DynamicVector<double> a;
blaze::CompressedVector<double> b;
// ... Resizing and initialization
auto c = evaluate( a * b );
\endcode
// In this scenario, the \c evaluate() function assists in deducing the exact result type of
// the operation via the \c auto keyword. Please note that if \c evaluate() is used in this
// way, no temporary vector is created and no copy operation is performed. Instead, the result
// is directly written to the target vector due to the return value optimization (RVO). However,
// if \c evaluate() is used in combination with an explicit target type, a temporary will be
// created and a copy operation will be performed if the used type differs from the type
// returned from the function:
\code
CompressedVector<double> d( a * b ); // No temporary & no copy operation
DynamicVector<double> e( a * b ); // Temporary & copy operation
d = evaluate( a * b ); // Temporary & copy operation
\endcode
// Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger
// expression. However, please note that \c evaluate() is not intended to be used for this
// purpose. This task is more elegantly and efficiently handled by the \c eval() function:
\code
blaze::DynamicVector<double> a, b, c, d;
d = a + evaluate( b * c ); // Unnecessary creation of a temporary vector
d = a + eval( b * c ); // No creation of a temporary vector
\endcode
// In contrast to the \c evaluate() function, \c eval() can take the complete expression
// into account and therefore can guarantee the most efficient way to evaluate it (see also
// \ref intra_statement_optimization).
//
//
// \n \section vector_operations_modifying_operations Modifying Operations
// <hr>
//
// \subsection vector_operations_resize_reserve .resize() / .reserve()
//
// The size of a \c StaticVector is fixed by the second template parameter and a \c CustomVector
// cannot be resized. In contrast, the size of \c DynamicVectors, \c HybridVectors as well as
// \c CompressedVectors can be changed via the \c resize() function:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
DynamicVector<int,columnVector> v1;
CompressedVector<int,rowVector> v2( 4 );
v2[1] = -2;
v2[3] = 11;
// Adapting the size of the dynamic and compressed vectors. The (optional) second parameter
// specifies whether the existing elements should be preserved. Per default, the existing
// elements are preserved.
v1.resize( 5UL ); // Resizing vector v1 to 5 elements. Elements of built-in type remain
// uninitialized, elements of class type are default constructed.
v1.resize( 3UL, false ); // Resizing vector v1 to 3 elements. The old elements are lost, the
// new elements are NOT initialized!
v2.resize( 8UL, true ); // Resizing vector v2 to 8 elements. The old elements are preserved.
v2.resize( 5UL, false ); // Resizing vector v2 to 5 elements. The old elements are lost.
\endcode
// Note that resizing a vector invalidates all existing views (see e.g. \ref views_subvectors)
// on the vector:
\code
typedef blaze::DynamicVector<int,rowVector> VectorType;
typedef blaze::Subvector<VectorType> SubvectorType;
VectorType v1( 10UL ); // Creating a dynamic vector of size 10
SubvectorType sv = subvector( v1, 2UL, 5UL ); // Creating a view on the range [2..6]
v1.resize( 6UL ); // Resizing the vector invalidates the view
\endcode
// When the internal capacity of a vector is no longer sufficient, the allocation of a larger
// junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve()
// function can be used up front to set the internal capacity:
\code
blaze::DynamicVector<int> v1;
v1.reserve( 100 );
v1.size(); // Returns 0
v1.capacity(); // Returns at least 100
\endcode
// Note that the size of the vector remains unchanged, but only the internal capacity is set
// according to the specified value!
//
// \n \subsection vector_operations_shrinkToFit .shrinkToFit()
//
// The internal capacity of vectors with dynamic memory is preserved in order to minimize the
// number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead
// to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal
// capacity:
\code
blaze::DynamicVector<int> v1( 1000UL ); // Create a vector of 1000 integers
v1.resize( 10UL ); // Resize to 10, but the capacity is preserved
v1.shrinkToFit(); // Remove the unused capacity
\endcode
// Please note that due to padding the capacity might not be reduced exactly to \c size(). Please
// also note that in case a reallocation occurs, all iterators (including \c end() iterators), all
// pointers and references to elements of the vector are invalidated.
//
// \subsection vector_operations_reset_clear reset() / clear()
//
// In order to reset all elements of a vector, the \c reset() function can be used:
\code
// Setup of a single precision column vector, whose elements are initialized with 2.0F.
blaze::DynamicVector<float> v1( 3UL, 2.0F );
// Resetting all elements to 0.0F. Only the elements are reset, the size of the vector is unchanged.
reset( v1 ); // Resetting all elements
v1.size(); // Returns 3: size and capacity remain unchanged
\endcode
// In order to return a vector to its default state (i.e. the state of a default constructed
// vector), the \c clear() function can be used:
\code
// Setup of a single precision column vector, whose elements are initialized with -1.0F.
blaze::DynamicVector<float> v1( 5, -1.0F );
// Resetting the entire vector.
clear( v1 ); // Resetting the entire vector
v1.size(); // Returns 0: size is reset, but capacity remains unchanged
\endcode
// Note that resetting or clearing both dense and sparse vectors does not change the capacity
// of the vectors.
//
//
// \n \subsection vector_operations_swap swap()
//
// Via the \c swap() function it is possible to completely swap the contents of two vectors of
// the same type:
\code
blaze::DynamicVector<int,columnVector> v1( 10UL );
blaze::DynamicVector<int,columnVector> v2( 20UL );
swap( v1, v2 ); // Swapping the contents of v1 and v2
\endcode
// \n \section vector_operations_arithmetic_operations Arithmetic Operations
// <hr>
//
// \subsection vector_operations_normalize normalize()
//
// The \c normalize() function can be used to scale any non-zero vector to a length of 1. In
// case the vector does not contain a single non-zero element (i.e. is a zero vector), the
// \c normalize() function returns a zero vector.
\code
blaze::DynamicVector<float,columnVector> v1( 10UL );
blaze::CompressedVector<double,columnVector> v2( 12UL );
v1 = normalize( v1 ); // Normalizing the dense vector v1
length( v1 ); // Returns 1 (or 0 in case of a zero vector)
v1 = normalize( v2 ); // Assigning v1 the normalized vector v2
length( v1 ); // Returns 1 (or 0 in case of a zero vector)
\endcode
// Note that the \c normalize() function only works for floating point vectors. The attempt to
// use it for an integral vector results in a compile time error.
//
//
// \n \subsection vector_operations_min_max min() / max()
//
// The \c min() and \c max() functions can be used for a single vector or multiple vectors. If
// passed a single vector, the functions return the smallest and largest element of the given
// dense or sparse vector, respectively:
\code
blaze::StaticVector<int,4UL,rowVector> a{ -5, 2, 7, -4 };
min( a ); // Returns -5
max( a ); // Returns 7
\endcode
// In case the vector currently has a size of 0, both functions return 0. Additionally, in case
// a given sparse vector is not completely filled, the zero elements are taken into account. For
// example, the following compressed vector has only two non-zero elements. However, the minimum
// of this vector is 0:
\code
blaze::CompressedVector<int> b( 4UL, 2UL );
b[0] = 1;
b[2] = 3;
min( b ); // Returns 0
\endcode
// If passed two or more dense vectors, the \c min() and \c max() functions compute the
// componentwise minimum or maximum of the given vectors, respectively:
\code
blaze::StaticVector<int,4UL,rowVector> c{ -5, 1, -7, 4 };
blaze::StaticVector<int,4UL,rowVector> d{ -5, 3, 0, 2 };
min( a, c ); // Results in the vector ( -5, 1, -7, -4 )
max( a, c, d ); // Results in the vector ( -5, 3, 7, 4 )
\endcode
// Please note that sparse vectors can only be used in the unary \c min() and \c max() functions.
// Also note that all forms of the \c min() and \c max() functions can be used to compute the
// smallest and largest element of a vector expression:
\code
min( a + b + c ); // Returns -9, i.e. the smallest value of the resulting vector
max( a - b - c ); // Returns 11, i.e. the largest value of the resulting vector
min( a + c, c - d ); // Results in ( -10 -2 -7 0 )
max( a - c, c + d ); // Results in ( 0 4 14 6 )
\endcode
// \n \subsection vector_operators_abs abs()
//
// The \c abs() function can be used to compute the absolute values of each element of a vector.
// For instance, the following computation
\code
blaze::StaticVector<int,3UL,rowVector> a{ -1, 2, -3 };
blaze::StaticVector<int,3UL,rowVector> b( abs( a ) );
\endcode
// results in the vector
\f$ b = \left(\begin{array}{*{1}{c}}
1 \\
2 \\
3 \\
\end{array}\right)\f$
// \n \subsection vector_operations_rounding_functions floor() / ceil() / trunc() / round()
//
// The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up
// each element of a vector, respectively:
\code
blaze::StaticVector<double,3UL,rowVector> a, b;
b = floor( a ); // Rounding down each element of the vector
b = ceil ( a ); // Rounding up each element of the vector
b = trunc( a ); // Truncating each element of the vector
b = round( a ); // Rounding each element of the vector
\endcode
// \n \subsection vector_operators_conj conj()
//
// The \c conj() function can be applied on a dense or sparse vector to compute the complex
// conjugate of each element of the vector:
\code
using blaze::StaticVector;
typedef std::complex<double> cplx;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Computing the vector of complex conjugates
// ( (-2, 1) )
// ( ( 1,-1) )
StaticVector<cplx,2UL> b;
b = conj( a );
\endcode
// Additionally, vectors can be conjugated in-place via the \c conjugate() function:
\code
blaze::DynamicVector<cplx> c( 5UL );
conjugate( c ); // In-place conjugate operation.
c = conj( c ); // Same as above
\endcode
// \n \subsection vector_operators_real real()
//
// The \c real() function can be used on a dense or sparse vector to extract the real part of
// each element of the vector:
\code
using blaze::StaticVector;
typedef std::complex<double> cplx;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Extracting the real part of each vector element
// ( -2 )
// ( 1 )
StaticVector<double,2UL> b;
b = real( a );
\endcode
// \n \subsection vector_operators_imag imag()
//
// The \c imag() function can be used on a dense or sparse vector to extract the imaginary part
// of each element of the vector:
\code
using blaze::StaticVector;
typedef std::complex<double> cplx;
// Creating the vector
// ( (-2,-1) )
// ( ( 1, 1) )
StaticVector<cplx,2UL> a{ cplx(-2.0,-1.0), cplx(1.0,1.0) };
// Extracting the imaginary part of each vector element
// ( -1 )
// ( 1 )
StaticVector<double,2UL> b;
b = imag( a );
\endcode
// \n \subsection vector_operations_sqrt sqrt() / invsqrt()
//
// Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a
// vector can be computed:
\code
blaze::DynamicVector<double> a, b, c;
b = sqrt( a ); // Computes the square root of each element
c = invsqrt( a ); // Computes the inverse square root of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_cbrt cbrt() / invcbrt()
//
// The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root
// of each element of a vector:
\code
blaze::HybridVector<double,3UL> a, b, c;
b = cbrt( a ); // Computes the cubic root of each element
c = invcbrt( a ); // Computes the inverse cubic root of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_clamp clamp()
//
// The \c clamp() function can be used to restrict all elements of a vector to a specific range:
\code
blaze::DynamicVector<double> a, b
b = clamp( a, -1.0, 1.0 ); // Restrict all elements to the range [-1..1]
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_pow pow()
//
// The \c pow() function can be used to compute the exponential value of each element of a vector:
\code
blaze::StaticVector<double,3UL> a, b;
b = pow( a, 1.2 ); // Computes the exponential value of each element
\endcode
// \n \subsection vector_operations_exp exp() / exp2() / exp10()
//
// \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a
// vector, respectively:
\code
blaze::DynamicVector<double> a, b;
b = exp( a ); // Computes the base e exponential of each element
b = exp2( a ); // Computes the base 2 exponential of each element
b = exp10( a ); // Computes the base 10 exponential of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_log log() / log2() / log10()
//
// The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary
// and common logarithm of each element of a vector:
\code
blaze::StaticVector<double,3UL> a, b;
b = log( a ); // Computes the natural logarithm of each element
b = log2( a ); // Computes the binary logarithm of each element
b = log10( a ); // Computes the common logarithm of each element
\endcode
// \n \subsection vector_operations_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan()
//
// The following trigonometric functions are available for both dense and sparse vectors:
\code
blaze::DynamicVector<double> a, b;
b = sin( a ); // Computes the sine of each element of the vector
b = cos( a ); // Computes the cosine of each element of the vector
b = tan( a ); // Computes the tangent of each element of the vector
b = asin( a ); // Computes the inverse sine of each element of the vector
b = acos( a ); // Computes the inverse cosine of each element of the vector
b = atan( a ); // Computes the inverse tangent of each element of the vector
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh()
//
// The following hyperbolic functions are available for both dense and sparse vectors:
\code
blaze::DynamicVector<double> a, b;
b = sinh( a ); // Computes the hyperbolic sine of each element of the vector
b = cosh( a ); // Computes the hyperbolic cosine of each element of the vector
b = tanh( a ); // Computes the hyperbolic tangent of each element of the vector
b = asinh( a ); // Computes the inverse hyperbolic sine of each element of the vector
b = acosh( a ); // Computes the inverse hyperbolic cosine of each element of the vector
b = atanh( a ); // Computes the inverse hyperbolic tangent of each element of the vector
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_erf erf() / erfc()
//
// The \c erf() and \c erfc() functions compute the (complementary) error function of each
// element of a vector:
\code
blaze::StaticVector<double,3UL,rowVector> a, b;
b = erf( a ); // Computes the error function of each element
b = erfc( a ); // Computes the complementary error function of each element
\endcode
// Note that in case of sparse vectors only the non-zero elements are taken into account!
//
//
// \n \subsection vector_operations_map map() / forEach()
//
// Via the unary and binary \c map() functions it is possible to execute componentwise custom
// operations on vectors. The unary \c map() function can be used to apply a custom operation
// on each element of a dense or sparse vector. For instance, the following example demonstrates
// a custom square root computation via a lambda:
\code
blaze::DynamicVector<double> a, b;
b = map( a, []( double d ) { return std::sqrt( d ); } );
\endcode
// The binary \c map() function can be used to apply an operation pairwise to the elements of
// two dense vectors. The following example demonstrates the merging of two vectors of double
// precision values into a vector of double precision complex numbers:
\code
blaze::DynamicVector<double> real{ 2.1, -4.2, 1.0, 0.6 };
blaze::DynamicVector<double> imag{ 0.3, 1.4, 2.9, -3.4 };
blaze::DynamicVector< complex<double> > cplx;
// Creating the vector
// ( (-2.1, 0.3) )
// ( (-4.2, -1.4) )
// ( ( 1.0, 2.9) )
// ( ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } );
\endcode
// Although the computation can be parallelized it is not vectorized and thus cannot perform at
// peak performance. However, it is also possible to create vectorized custom operations. See
// \ref custom_operations for a detailed overview of the possibilities of custom operations.
//
// Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in
// form of the \c forEach() function. With the introduction of binary custom functions, the
// \c forEach() function has been renamed to \c map(). The \c forEach() function can still be
// used (even for binary custom operations), but the function might be deprecated in future
// releases of \b Blaze.
//
//
// \n Previous: \ref vector_types Next: \ref matrices
*/
//*************************************************************************************************
//**Matrices***************************************************************************************
/*!\page matrices Matrices
//
// \tableofcontents
//
//
// \n \section matrices_general General Concepts
// <hr>
//
// The \b Blaze library currently offers four dense matrix types (\ref matrix_types_static_matrix,
// \ref matrix_types_dynamic_matrix, \ref matrix_types_hybrid_matrix, and \ref matrix_types_custom_matrix)
// and one sparse matrix type (\ref matrix_types_compressed_matrix). All matrices can either be
// stored as row-major matrices or column-major matrices:
\code
using blaze::DynamicMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
// Setup of the 2x3 row-major dense matrix
//
// ( 1 2 3 )
// ( 4 5 6 )
//
DynamicMatrix<int,rowMajor> A{ { 1, 2, 3 },
{ 4, 5, 6 } };
// Setup of the 3x2 column-major dense matrix
//
// ( 1 4 )
// ( 2 5 )
// ( 3 6 )
//
DynamicMatrix<int,columnMajor> B{ { 1, 4 },
{ 2, 5 },
{ 3, 6 } };
\endcode
// Per default, all matrices in \b Blaze are row-major matrices:
\code
// Instantiation of a 3x3 row-major matrix
blaze::DynamicMatrix<int> C( 3UL, 3UL );
\endcode
// \n \section matrices_details Matrix Details
// <hr>
//
// - \ref matrix_types
// - \ref matrix_operations
//
//
// \n \section matrices_examples Examples
// <hr>
\code
using blaze::StaticMatrix;
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
StaticMatrix<double,6UL,20UL> A; // Instantiation of a 6x20 row-major static matrix
CompressedMatrix<double,rowMajor> B; // Instantiation of a row-major compressed matrix
DynamicMatrix<double,columnMajor> C; // Instantiation of a column-major dynamic matrix
// ... Resizing and initialization
C = A * B;
\endcode
// \n Previous: \ref vector_operations Next: \ref matrix_types
*/
//*************************************************************************************************
//**Matrix Types***********************************************************************************
/*!\page matrix_types Matrix Types
//
// \tableofcontents
//
//
// \n \section matrix_types_static_matrix StaticMatrix
// <hr>
//
// The blaze::StaticMatrix class template is the representation of a fixed size matrix with
// statically allocated elements of arbitrary type. It can be included via the header file
\code
#include <blaze/math/StaticMatrix.h>
\endcode
// The type of the elements, the number of rows and columns, and the storage order of the matrix
// can be specified via the four template parameters:
\code
template< typename Type, size_t M, size_t N, bool SO >
class StaticMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. StaticMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c M : specifies the total number of rows of the matrix.
// - \c N : specifies the total number of columns of the matrix. Note that it is expected
// that StaticMatrix is only used for tiny and small matrices.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::StaticMatrix is perfectly suited for small to medium matrices whose dimensions are
// known at compile time:
\code
// Definition of a 3x4 integral row-major matrix
blaze::StaticMatrix<int,3UL,4UL> A;
// Definition of a 4x6 single precision row-major matrix
blaze::StaticMatrix<float,4UL,6UL,blaze::rowMajor> B;
// Definition of a 6x4 double precision column-major matrix
blaze::StaticMatrix<double,6UL,4UL,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_dynamic_matrix DynamicMatrix
// <hr>
//
// The blaze::DynamicMatrix class template is the representation of an arbitrary sized matrix
// with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be included
// via the header file
\code
#include <blaze/math/DynamicMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class DynamicMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. DynamicMatrix can be used with any
// non-cv-qualified, non-reference element type.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::DynamicMatrix is the default choice for all kinds of dense matrices and the best
// choice for medium to large matrices. The number of rows and columns can be modified at runtime:
\code
// Definition of a 3x4 integral row-major matrix
blaze::DynamicMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::DynamicMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::DynamicMatrix<double,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_hybrid_matrix HybridMatrix
// <hr>
//
// The HybridMatrix class template combines the flexibility of a dynamically sized matrix with
// the efficiency and performance of a fixed size matrix. It is implemented as a crossing between
// the blaze::StaticMatrix and the blaze::DynamicMatrix class templates: Similar to the static
// matrix it uses static stack memory instead of dynamically allocated memory and similar to the
// dynamic matrix it can be resized (within the extend of the static memory). It can be included
// via the header file
\code
#include <blaze/math/HybridMatrix.h>
\endcode
// The type of the elements, the maximum number of rows and columns and the storage order of the
// matrix can be specified via the four template parameters:
\code
template< typename Type, size_t M, size_t N, bool SO >
class HybridMatrix;
\endcode
// - Type: specifies the type of the matrix elements. HybridMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - M : specifies the maximum number of rows of the matrix.
// - N : specifies the maximum number of columns of the matrix. Note that it is expected
// that HybridMatrix is only used for tiny and small matrices.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::HybridMatrix is a suitable choice for small to medium matrices, whose dimensions
// are not known at compile time or not fixed at runtime, but whose maximum dimensions are known
// at compile time:
\code
// Definition of a 3x4 integral row-major matrix with maximum dimensions of 6x8
blaze::HybridMatrix<int,6UL,8UL> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix with maximum dimensions of 12x16
blaze::HybridMatrix<float,12UL,16UL,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a 0x0 double precision column-major matrix and maximum dimensions of 6x6
blaze::HybridMatrix<double,6UL,6UL,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_custom_matrix CustomMatrix
// <hr>
//
// The blaze::CustomMatrix class template provides the functionality to represent an external
// array of elements of arbitrary type and a fixed size as a native \b Blaze dense matrix data
// structure. Thus in contrast to all other dense matrix types a custom matrix does not perform
// any kind of memory allocation by itself, but it is provided with an existing array of element
// during construction. A custom matrix can therefore be considered an alias to the existing
// array. It can be included via the header file
\code
#include <blaze/math/CustomMatrix.h>
\endcode
// The type of the elements, the properties of the given array of elements and the storage order
// of the matrix can be specified via the following four template parameters:
\code
template< typename Type, bool AF, bool PF, bool SO >
class CustomMatrix;
\endcode
// - Type: specifies the type of the matrix elements. blaze::CustomMatrix can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - AF : specifies whether the represented, external arrays are properly aligned with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - PF : specified whether the represented, external arrays are properly padded with
// respect to the available instruction set (SSE, AVX, ...) or not.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::CustomMatrix is the right choice if any external array needs to be represented as
// a \b Blaze dense matrix data structure or if a custom memory allocation strategy needs to be
// realized:
\code
using blaze::CustomMatrix;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of an unmanaged 3x4 custom matrix for unaligned, unpadded integer arrays
typedef CustomMatrix<int,unaligned,unpadded,rowMajor> UnalignedUnpadded;
std::vector<int> vec( 12UL )
UnalignedUnpadded A( &vec[0], 3UL, 4UL );
// Definition of a managed 5x6 custom matrix for unaligned but padded 'float' arrays
typedef CustomMatrix<float,unaligned,padded,columnMajor> UnalignedPadded;
UnalignedPadded B( new float[40], 5UL, 6UL, 8UL, blaze::ArrayDelete() );
// Definition of a managed 12x13 custom matrix for aligned, unpadded 'double' arrays
typedef CustomMatrix<double,aligned,unpadded,rowMajor> AlignedUnpadded;
AlignedUnpadded C( blaze::allocate<double>( 192UL ), 12UL, 13UL, 16UL, blaze::Deallocate );
// Definition of a 7x14 custom matrix for aligned, padded 'complex<double>' arrays
typedef CustomMatrix<complex<double>,aligned,padded,columnMajor> AlignedPadded;
AlignedPadded D( blaze::allocate<double>( 112UL ), 7UL, 14UL, 16UL, blaze::Deallocate() );
\endcode
// In comparison with the remaining \b Blaze dense matrix types blaze::CustomMatrix has several
// special characteristics. All of these result from the fact that a custom matrix is not
// performing any kind of memory allocation, but instead is given an existing array of elements.
// The following sections discuss all of these characteristics:
//
// -# <b>\ref matrix_types_custom_matrix_memory_management</b>
// -# <b>\ref matrix_types_custom_matrix_copy_operations</b>
// -# <b>\ref matrix_types_custom_matrix_alignment</b>
// -# <b>\ref matrix_types_custom_matrix_padding</b>
//
// \n \subsection matrix_types_custom_matrix_memory_management Memory Management
//
// The blaze::CustomMatrix class template acts as an adaptor for an existing array of elements. As
// such it provides everything that is required to use the array just like a native \b Blaze dense
// matrix data structure. However, this flexibility comes with the price that the user of a custom
// matrix is responsible for the resource management.
//
// The following examples give an impression of several possible types of custom matrices:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unaligned;
using blaze::padded;
using blaze::unpadded;
// Definition of a 3x4 custom row-major matrix with unaligned, unpadded and externally
// managed integer array. Note that the std::vector must be guaranteed to outlive the
// custom matrix!
std::vector<int> vec( 12UL );
CustomMatrix<int,unaligned,unpadded> A( &vec[0], 3UL, 4UL );
// Definition of a custom 8x12 matrix for an aligned and padded integer array of
// capacity 128 (including 8 padding elements per row). Note that the std::unique_ptr
// must be guaranteed to outlive the custom matrix!
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 128UL ) );
CustomMatrix<int,aligned,padded> B( memory.get(), 8UL, 12UL, 16UL );
\endcode
// \n \subsection matrix_types_custom_matrix_copy_operations Copy Operations
//
// As with all dense matrices it is possible to copy construct a custom matrix:
\code
using blaze::CustomMatrix;
using blaze::unaligned;
using blaze::unpadded;
typedef CustomMatrix<int,unaligned,unpadded> CustomType;
std::vector<int> vec( 6UL, 10 ); // Vector of 6 integers of the value 10
CustomType A( &vec[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix
a[1] = 20; // Also modifies the std::vector
CustomType B( a ); // Creating a copy of vector a
b[2] = 20; // Also affects matrix A and the std::vector
\endcode
// It is important to note that a custom matrix acts as a reference to the specified array. Thus
// the result of the copy constructor is a new custom matrix that is referencing and representing
// the same array as the original custom matrix.
//
// In contrast to copy construction, just as with references, copy assignment does not change
// which array is referenced by the custom matrices, but modifies the values of the array:
\code
std::vector<int> vec2( 6UL, 4 ); // Vector of 6 integers of the value 4
CustomType C( &vec2[0], 2UL, 3UL ); // Represent the std::vector as Blaze dense matrix
A = C; // Copy assignment: Set all values of matrix A and B to 4.
\endcode
// \n \subsection matrix_types_custom_matrix_alignment Alignment
//
// In case the custom matrix is specified as \c aligned the passed array must adhere to some
// alignment restrictions based on the alignment requirements of the used data type and the
// used instruction set (SSE, AVX, ...). The restriction applies to the first element of each
// row/column: In case of a row-major matrix the first element of each row must be properly
// aligned, in case of a column-major matrix the first element of each column must be properly
// aligned. For instance, if a row-major matrix is used and AVX is active the first element of
// each row must be 32-bit aligned:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
using blaze::rowMajor;
// Allocation of 32-bit aligned memory
std::unique_ptr<int[],Deallocate> memory( allocate<int>( 40UL ) );
CustomMatrix<int,aligned,padded,rowMajor> A( memory.get(), 5UL, 6UL, 8UL );
\endcode
// In the example, the row-major matrix has six columns. However, since with AVX eight integer
// values are loaded together the matrix is padded with two additional elements. This guarantees
// that the first element of each row is 32-bit aligned. In case the alignment requirements are
// violated, a \c std::invalid_argument exception is thrown.
//
// \n \subsection matrix_types_custom_matrix_padding Padding
//
// Adding padding elements to the end of each row/column can have a significant impact on the
// performance. For instance, assuming that AVX is available, then two aligned, padded, 3x3 double
// precision matrices can be added via three SIMD addition operations:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::padded;
typedef CustomMatrix<double,aligned,padded> CustomType;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 12UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 12UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 12UL ) );
// Creating padded custom 3x3 matrix with an additional padding element in each row
CustomType A( memory1.get(), 3UL, 3UL, 4UL );
CustomType B( memory2.get(), 3UL, 3UL, 4UL );
CustomType C( memory3.get(), 3UL, 3UL, 4UL );
// ... Initialization
C = A + B; // AVX-based matrix addition
\endcode
// In this example, maximum performance is possible. However, in case no padding elements are
// inserted a scalar addition has to be used:
\code
using blaze::CustomMatrix;
using blaze::Deallocate;
using blaze::allocate;
using blaze::aligned;
using blaze::unpadded;
typedef CustomMatrix<double,aligned,unpadded> CustomType;
std::unique_ptr<int[],Deallocate> memory1( allocate<double>( 9UL ) );
std::unique_ptr<int[],Deallocate> memory2( allocate<double>( 9UL ) );
std::unique_ptr<int[],Deallocate> memory3( allocate<double>( 9UL ) );
// Creating unpadded custom 3x3 matrix
CustomType A( memory1.get(), 3UL, 3UL );
CustomType B( memory2.get(), 3UL, 3UL );
CustomType C( memory3.get(), 3UL, 3UL );
// ... Initialization
C = A + B; // Scalar matrix addition
\endcode
// Note that the construction of padded and unpadded aligned matrices looks identical. However,
// in case of padded matrices, \b Blaze will zero initialize the padding element and use them
// in all computations in order to achieve maximum performance. In case of an unpadded matrix
// \b Blaze will ignore the elements with the downside that it is not possible to load a complete
// row to an AVX register, which makes it necessary to fall back to a scalar addition.
//
// The number of padding elements is required to be sufficient with respect to the available
// instruction set: In case of an aligned padded custom matrix the added padding elements must
// guarantee that the total number of elements in each row/column is a multiple of the SIMD
// vector width. In case of an unaligned padded matrix the number of padding elements can be
// greater or equal the number of padding elements of an aligned padded custom matrix. In case
// the padding is insufficient with respect to the available instruction set, a
// \c std::invalid_argument exception is thrown.
//
//
// \n \section matrix_types_compressed_matrix CompressedMatrix
// <hr>
//
// The blaze::CompressedMatrix class template is the representation of an arbitrary sized sparse
// matrix with \f$ M \cdot N \f$ dynamically allocated elements of arbitrary type. It can be
// included via the header file
\code
#include <blaze/math/CompressedMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class CompressedMatrix;
\endcode
// - \c Type: specifies the type of the matrix elements. CompressedMatrix can be used with
// any non-cv-qualified, non-reference, non-pointer element type.
// - \c SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::CompressedMatrix is the right choice for all kinds of sparse matrices:
\code
// Definition of a 3x4 integral row-major matrix
blaze::CompressedMatrix<int> A( 3UL, 4UL );
// Definition of a 4x6 single precision row-major matrix
blaze::CompressedMatrix<float,blaze::rowMajor> B( 4UL, 6UL );
// Definition of a double precision column-major matrix with 0 rows and columns
blaze::CompressedMatrix<double,blaze::columnMajor> C;
\endcode
// \n \section matrix_types_identity_matrix IdentityMatrix
// <hr>
//
// The blaze::IdentityMatrix class template is the representation of an immutable, arbitrary
// sized identity matrix with \f$ N \cdot N \f$ elements of arbitrary type. It can be included
// via the header file
\code
#include <blaze/math/IdentityMatrix.h>
\endcode
// The type of the elements and the storage order of the matrix can be specified via the two
// template parameters:
\code
template< typename Type, bool SO >
class IdentityMatrix;
\endcode
// - Type: specifies the type of the matrix elements. IdentityMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer element type.
// - SO : specifies the storage order (blaze::rowMajor, blaze::columnMajor) of the matrix.
// The default value is blaze::rowMajor.
//
// The blaze::IdentityMatrix is the perfect choice to represent an identity matrix:
\code
// Definition of a 3x3 integral row-major identity matrix
blaze::IdentityMatrix<int> A( 3UL );
// Definition of a 6x6 single precision row-major identity matrix
blaze::IdentityMatrix<float,blaze::rowMajor> B( 6UL );
// Definition of a double precision column-major identity matrix with 0 rows and columns
blaze::IdentityMatrix<double,blaze::columnMajor> C;
\endcode
// \n Previous: \ref matrices Next: \ref matrix_operations
*/
//*************************************************************************************************
//**Matrix Operations******************************************************************************
/*!\page matrix_operations Matrix Operations
//
// \tableofcontents
//
//
// \n \section matrix_operations_constructors Constructors
// <hr>
//
// Matrices are just as easy and intuitive to create as vectors. Still, there are a few rules
// to be aware of:
// - In case the last template parameter (the storage order) is omitted, the matrix is per
// default stored in row-major order.
// - The elements of a \c StaticMatrix or \c HybridMatrix are default initialized (i.e. built-in
// data types are initialized to 0, class types are initialized via the default constructor).
// - Newly allocated elements of a \c DynamicMatrix or \c CompressedMatrix remain uninitialized
// if they are of built-in type and are default constructed if they are of class type.
//
// \n \subsection matrix_operations_default_construction Default Construction
\code
using blaze::StaticMatrix;
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
// All matrices can be default constructed. Whereas the size of
// a StaticMatrix is fixed via the second and third template
// parameter, the initial size of a constructed DynamicMatrix
// or CompressedMatrix is 0.
StaticMatrix<int,2UL,2UL> M1; // Instantiation of a 2x2 integer row-major
// matrix. All elements are initialized to 0.
DynamicMatrix<float> M2; // Instantiation of a single precision dynamic
// row-major matrix with 0 rows and 0 columns.
DynamicMatrix<double,columnMajor> M3; // Instantiation of a double precision dynamic
// column-major matrix with 0 rows and 0 columns.
CompressedMatrix<int> M4; // Instantiation of a compressed integer
// row-major matrix of size 0x0.
CompressedMatrix<double,columnMajor> M5; // Instantiation of a compressed double precision
// column-major matrix of size 0x0.
\endcode
// \n \subsection matrix_operations_size_construction Construction with Specific Size
//
// The \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix classes offer a constructor
// that allows to immediately give the matrices a specific number of rows and columns:
\code
DynamicMatrix<int> M6( 5UL, 4UL ); // Instantiation of a 5x4 dynamic row-major
// matrix. The elements are not initialized.
HybridMatrix<double,5UL,9UL> M7( 3UL, 7UL ); // Instantiation of a 3x7 hybrid row-major
// matrix. The elements are not initialized.
CompressedMatrix<float,columnMajor> M8( 8UL, 6UL ); // Instantiation of an empty 8x6 compressed
// column-major matrix.
\endcode
// Note that dense matrices (in this case \c DynamicMatrix and \c HybridMatrix) immediately
// allocate enough capacity for all matrix elements. Sparse matrices on the other hand (in this
// example \c CompressedMatrix) merely acquire the size, but don't necessarily allocate memory.
//
//
// \n \subsection matrix_operations_initialization_constructors Initialization Constructors
//
// All dense matrix classes offer a constructor for a direct, homogeneous initialization of all
// matrix elements. In contrast, for sparse matrices the predicted number of non-zero elements
// can be specified.
\code
StaticMatrix<int,4UL,3UL,columnMajor> M9( 7 ); // Instantiation of a 4x3 integer column-major
// matrix. All elements are initialized to 7.
DynamicMatrix<float> M10( 2UL, 5UL, 2.0F ); // Instantiation of a 2x5 single precision row-major
// matrix. All elements are initialized to 2.0F.
CompressedMatrix<int> M11( 3UL, 4UL, 4 ); // Instantiation of a 3x4 integer row-major
// matrix with capacity for 4 non-zero elements.
\endcode
// \n \subsection matrix_operations_array_construction Array Construction
//
// Alternatively, all dense matrix classes offer a constructor for an initialization with a
// dynamic or static array. If the matrix is initialized from a dynamic array, the constructor
// expects the dimensions of values provided by the array as first and second argument, the
// array as third argument. In case of a static array, the fixed size of the array is used:
\code
const std::unique_ptr<double[]> array1( new double[6] );
// ... Initialization of the dynamic array
blaze::StaticMatrix<double,2UL,3UL> M12( 2UL, 3UL, array1.get() );
int array2[2][2] = { { 4, -5 }, { -6, 7 } };
blaze::StaticMatrix<int,2UL,2UL,rowMajor> M13( array2 );
\endcode
// \n \subsection matrix_operations_initializer_list_construction
//
// In addition, all dense matrix classes can be directly initialized by means of an initializer
// list:
\code
blaze::DynamicMatrix<float,columnMajor> M14{ { 3.1F, 6.4F },
{ -0.9F, -1.2F },
{ 4.8F, 0.6F } };
\endcode
// \n \subsection matrix_operations_copy_construction Copy Construction
//
// All dense and sparse matrices can be created as a copy of another dense or sparse matrix.
\code
StaticMatrix<int,5UL,4UL,rowMajor> M15( M6 ); // Instantiation of the dense row-major matrix M15
// as copy of the dense row-major matrix M6.
DynamicMatrix<float,columnMajor> M16( M8 ); // Instantiation of the dense column-major matrix M16
// as copy of the sparse column-major matrix M8.
CompressedMatrix<double,columnMajor> M17( M7 ); // Instantiation of the compressed column-major matrix
// M17 as copy of the dense row-major matrix M7.
CompressedMatrix<float,rowMajor> M18( M8 ); // Instantiation of the compressed row-major matrix
// M18 as copy of the compressed column-major matrix M8.
\endcode
// Note that it is not possible to create a \c StaticMatrix as a copy of a matrix with a different
// number of rows and/or columns:
\code
StaticMatrix<int,4UL,5UL,rowMajor> M19( M6 ); // Runtime error: Number of rows and columns
// does not match!
StaticMatrix<int,4UL,4UL,columnMajor> M20( M9 ); // Compile time error: Number of columns does
// not match!
\endcode
// \n \section matrix_operations_assignment Assignment
// <hr>
//
// There are several types of assignment to dense and sparse matrices:
// \ref matrix_operations_homogeneous_assignment, \ref matrix_operations_array_assignment,
// \ref matrix_operations_copy_assignment, and \ref matrix_operations_compound_assignment.
//
//
// \n \subsection matrix_operations_homogeneous_assignment Homogeneous Assignment
//
// It is possible to assign the same value to all elements of a dense matrix. All dense matrix
// classes provide an according assignment operator:
\code
blaze::StaticMatrix<int,3UL,2UL> M1;
blaze::DynamicMatrix<double> M2;
// Setting all integer elements of the StaticMatrix to 4
M1 = 4;
// Setting all double precision elements of the DynamicMatrix to 3.5
M2 = 3.5
\endcode
// \n \subsection matrix_operations_array_assignment Array Assignment
//
// Dense matrices can also be assigned a static array:
\code
blaze::StaticMatrix<int,2UL,2UL,rowMajor> M1;
blaze::StaticMatrix<int,2UL,2UL,columnMajor> M2;
blaze::DynamicMatrix<double> M3;
int array1[2][2] = { { 1, 2 }, { 3, 4 } };
double array2[3][2] = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } };
M1 = array1;
M2 = array1;
M3 = array2;
\endcode
// Note that the dimensions of the static array have to match the size of a \c StaticMatrix,
// whereas a \c DynamicMatrix is resized according to the array dimensions:
\f$ M3 = \left(\begin{array}{*{2}{c}}
3.1 & 6.4 \\
-0.9 & -1.2 \\
4.8 & 0.6 \\
\end{array}\right)\f$
// \n \subsection matrix_operations_initializer_list_assignment Initializer List Assignment
//
// Alternatively, it is possible to directly assign an initializer list to a dense matrix:
\code
blaze::DynamicMatrix<double> M;
M = { { 3.1, 6.4 }, { -0.9, -1.2 }, { 4.8, 0.6 } };
\endcode
// \n \subsection matrix_operations_copy_assignment Copy Assignment
//
// All kinds of matrices can be assigned to each other. The only restriction is that since a
// \c StaticMatrix cannot change its size, the assigned matrix must match both in the number of
// rows and in the number of columns.
\code
blaze::StaticMatrix<int,3UL,2UL,rowMajor> M1;
blaze::DynamicMatrix<int,rowMajor> M2( 3UL, 2UL );
blaze::DynamicMatrix<float,rowMajor> M3( 5UL, 2UL );
blaze::CompressedMatrix<int,rowMajor> M4( 3UL, 2UL );
blaze::CompressedMatrix<float,columnMajor> M5( 3UL, 2UL );
// ... Initialization of the matrices
M1 = M2; // OK: Assignment of a 3x2 dense row-major matrix to another 3x2 dense row-major matrix
M1 = M4; // OK: Assignment of a 3x2 sparse row-major matrix to a 3x2 dense row-major matrix
M1 = M3; // Runtime error: Cannot assign a 5x2 matrix to a 3x2 static matrix
M1 = M5; // OK: Assignment of a 3x2 sparse column-major matrix to a 3x2 dense row-major matrix
\endcode
// \n \subsection matrix_operations_compound_assignment Compound Assignment
//
// Compound assignment is also available for matrices: addition assignment, subtraction assignment,
// and multiplication assignment. In contrast to plain assignment, however, the number of rows
// and columns of the two operands have to match according to the arithmetic operation.
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> M1;
blaze::DynamicMatrix<int,rowMajor> M2( 2UL, 3UL );
blaze::CompressedMatrix<float,columnMajor> M3( 2UL, 3UL );
blaze::CompressedMatrix<float,rowMajor> M4( 2UL, 4UL );
blaze::StaticMatrix<float,2UL,4UL,rowMajor> M5;
blaze::CompressedMatrix<float,rowMajor> M6( 3UL, 2UL );
// ... Initialization of the matrices
M1 += M2; // OK: Addition assignment between two row-major matrices of the same dimensions
M1 -= M3; // OK: Subtraction assignment between between a row-major and a column-major matrix
M1 += M4; // Runtime error: No compound assignment between matrices of different size
M1 -= M5; // Compilation error: No compound assignment between matrices of different size
M2 *= M6; // OK: Multiplication assignment between two row-major matrices
\endcode
// Note that the multiplication assignment potentially changes the number of columns of the
// target matrix:
\f$\left(\begin{array}{*{3}{c}}
2 & 0 & 1 \\
0 & 3 & 2 \\
\end{array}\right) \times
\left(\begin{array}{*{2}{c}}
4 & 0 \\
1 & 0 \\
0 & 3 \\
\end{array}\right) =
\left(\begin{array}{*{2}{c}}
8 & 3 \\
3 & 6 \\
\end{array}\right)\f$
// Since a \c StaticMatrix cannot change its size, only a square StaticMatrix can be used in a
// multiplication assignment with other square matrices of the same dimensions.
//
//
// \n \section matrix_operations_element_access Element Access
// <hr>
//
// The easiest way to access a specific dense or sparse matrix element is via the function call
// operator. The indices to access a matrix are zero-based:
\code
blaze::DynamicMatrix<int> M1( 4UL, 6UL );
M1(0,0) = 1;
M1(0,1) = 3;
// ...
blaze::CompressedMatrix<double> M2( 5UL, 3UL );
M2(0,2) = 4.1;
M2(1,1) = -6.3;
\endcode
// Since dense matrices allocate enough memory for all contained elements, using the function
// call operator on a dense matrix directly returns a reference to the accessed value. In case
// of a sparse matrix, if the accessed value is currently not contained in the matrix, the
// value is inserted into the matrix prior to returning a reference to the value, which can
// be much more expensive than the direct access to a dense matrix. Consider the following
// example:
\code
blaze::CompressedMatrix<int> M1( 4UL, 4UL );
for( size_t i=0UL; i<M1.rows(); ++i ) {
for( size_t j=0UL; j<M1.columns(); ++j ) {
... = M1(i,j);
}
}
\endcode
// Although the compressed matrix is only used for read access within the for loop, using the
// function call operator temporarily inserts 16 non-zero elements into the matrix. Therefore,
// all matrices (sparse as well as dense) offer an alternate way via the \c begin(), \c cbegin(),
// \c end() and \c cend() functions to traverse all contained elements by iterator. Note that
// it is not possible to traverse all elements of the matrix, but that it is only possible to
// traverse elements in a row/column-wise fashion. In case of a non-const matrix, \c begin() and
// \c end() return an \c Iterator, which allows a manipulation of the non-zero value, in case of
// a constant matrix or in case \c cbegin() or \c cend() are used a \c ConstIterator is returned:
\code
using blaze::CompressedMatrix;
CompressedMatrix<int,rowMajor> M1( 4UL, 6UL );
// Traversing the matrix by Iterator
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::Iterator it=A.begin(i); it!=A.end(i); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
}
// Traversing the matrix by ConstIterator
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::ConstIterator it=A.cbegin(i); it!=A.cend(i); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the non-zero element.
}
}
\endcode
// Note that \c begin(), \c cbegin(), \c end(), and \c cend() are also available as free functions:
\code
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::Iterator it=begin( A, i ); it!=end( A, i ); ++it ) {
// ...
}
}
for( size_t i=0UL; i<A.rows(); ++i ) {
for( CompressedMatrix<int,rowMajor>::ConstIterator it=cbegin( A, i ); it!=cend( A, i ); ++it ) {
// ...
}
}
\endcode
// \n \section matrix_operations_element_insertion Element Insertion
// <hr>
//
// Whereas a dense matrix always provides enough capacity to store all matrix elements, a sparse
// matrix only stores the non-zero elements. Therefore it is necessary to explicitly add elements
// to the matrix. The first possibility to add elements to a sparse matrix is the function call
// operator:
\code
using blaze::CompressedMatrix;
CompressedMatrix<int> M1( 3UL, 4UL );
M1(1,2) = 9;
\endcode
// In case the element at the given position is not yet contained in the sparse matrix, it is
// automatically inserted. Otherwise the old value is replaced by the new value 2. The operator
// returns a reference to the sparse vector element.\n
// An alternative is the \c set() function: In case the element is not yet contained in the matrix
// the element is inserted, else the element's value is modified:
\code
// Insert or modify the value at position (2,0)
M1.set( 2, 0, 1 );
\endcode
// However, insertion of elements can be better controlled via the \c insert() function. In
// contrast to the function call operator and the \c set() function it emits an exception in case
// the element is already contained in the matrix. In order to check for this case, the \c find()
// function can be used:
\code
// In case the element at position (2,3) is not yet contained in the matrix it is inserted
// with a value of 4.
if( M1.find( 2, 3 ) == M1.end( 2 ) )
M1.insert( 2, 3, 4 );
\endcode
// Although the \c insert() function is very flexible, due to performance reasons it is not
// suited for the setup of large sparse matrices. A very efficient, yet also very low-level
// way to fill a sparse matrix is the \c append() function. It requires the sparse matrix to
// provide enough capacity to insert a new element in the specified row/column. Additionally,
// the index of the new element must be larger than the index of the previous element in the
// same row/column. Violating these conditions results in undefined behavior!
\code
M1.reserve( 0, 3 ); // Reserving space for three non-zero elements in row 0
M1.append( 0, 1, 2 ); // Appending the element 2 in row 0 at column index 1
M1.append( 0, 2, -4 ); // Appending the element -4 in row 0 at column index 2
// ...
\endcode
// The most efficient way to fill a sparse matrix with elements, however, is a combination of
// \c reserve(), \c append(), and the \c finalize() function:
\code
// Setup of the compressed row-major matrix
//
// ( 0 1 0 2 0 )
// A = ( 0 0 0 0 0 )
// ( 3 0 0 0 0 )
//
blaze::CompressedMatrix<int> M1( 3UL, 5UL );
M1.reserve( 3 ); // Reserving enough space for 3 non-zero elements
M1.append( 0, 1, 1 ); // Appending the value 1 in row 0 with column index 1
M1.append( 0, 3, 2 ); // Appending the value 2 in row 0 with column index 3
M1.finalize( 0 ); // Finalizing row 0
M1.finalize( 1 ); // Finalizing the empty row 1 to prepare row 2
M1.append( 2, 0, 3 ); // Appending the value 3 in row 2 with column index 0
M1.finalize( 2 ); // Finalizing row 2
\endcode
// \note The \c finalize() function has to be explicitly called for each row or column, even
// for empty ones!
// \note Although \c append() does not allocate new memory, it still invalidates all iterators
// returned by the \c end() functions!
//
//
// \n \section matrix_operations_non_modifying_operations Non-Modifying Operations
// <hr>
//
// \subsection matrix_operations_rows .rows()
//
// The current number of rows of a matrix can be acquired via the \c rows() member function:
\code
// Instantiating a dynamic matrix with 10 rows and 8 columns
blaze::DynamicMatrix<int> M1( 10UL, 8UL );
M1.rows(); // Returns 10
// Instantiating a compressed matrix with 8 rows and 12 columns
blaze::CompressedMatrix<double> M2( 8UL, 12UL );
M2.rows(); // Returns 8
\endcode
// Alternatively, the free functions \c rows() can be used to query the current number of rows of
// a matrix. In contrast to the member function, the free function can also be used to query the
// number of rows of a matrix expression:
\code
rows( M1 ); // Returns 10, i.e. has the same effect as the member function
rows( M2 ); // Returns 8, i.e. has the same effect as the member function
rows( M1 * M2 ); // Returns 10, i.e. the number of rows of the resulting matrix
\endcode
// \n \subsection matrix_operations_columns .columns()
//
// The current number of columns of a matrix can be acquired via the \c columns() member function:
\code
// Instantiating a dynamic matrix with 6 rows and 8 columns
blaze::DynamicMatrix<int> M1( 6UL, 8UL );
M1.columns(); // Returns 8
// Instantiating a compressed matrix with 8 rows and 7 columns
blaze::CompressedMatrix<double> M2( 8UL, 7UL );
M2.columns(); // Returns 7
\endcode
// There is also a free function \c columns() available, which can also be used to query the number
// of columns of a matrix expression:
\code
columns( M1 ); // Returns 8, i.e. has the same effect as the member function
columns( M2 ); // Returns 7, i.e. has the same effect as the member function
columns( M1 * M2 ); // Returns 7, i.e. the number of columns of the resulting matrix
\endcode
// \n \subsection matrix_operations_capacity .capacity()
//
// The \c capacity() member function returns the internal capacity of a dense or sparse matrix.
// Note that the capacity of a matrix doesn't have to be equal to the size of a matrix. In case of
// a dense matrix the capacity will always be greater or equal than the total number of elements
// of the matrix. In case of a sparse matrix, the capacity will usually be much less than the
// total number of elements.
\code
blaze::DynamicMatrix<float> M1( 5UL, 7UL );
blaze::StaticMatrix<float,7UL,4UL> M2;
M1.capacity(); // Returns at least 35
M2.capacity(); // Returns at least 28
\endcode
// There is also a free function \c capacity() available to query the capacity. However, please
// note that this function cannot be used to query the capacity of a matrix expression:
\code
capacity( M1 ); // Returns at least 35, i.e. has the same effect as the member function
capacity( M2 ); // Returns at least 28, i.e. has the same effect as the member function
capacity( M1 * M2 ); // Compilation error!
\endcode
// \n \subsection matrix_operations_nonzeros .nonZeros()
//
// For both dense and sparse matrices the current number of non-zero elements can be queried
// via the \c nonZeros() member function. In case of matrices there are two flavors of the
// \c nonZeros() function: One returns the total number of non-zero elements in the matrix,
// the second returns the number of non-zero elements in a specific row (in case of a row-major
// matrix) or column (in case of a column-major matrix). Sparse matrices directly return their
// number of non-zero elements, dense matrices traverse their elements and count the number of
// non-zero elements.
\code
blaze::DynamicMatrix<int,rowMajor> M1( 3UL, 5UL );
// ... Initializing the dense matrix
M1.nonZeros(); // Returns the total number of non-zero elements in the dense matrix
M1.nonZeros( 2 ); // Returns the number of non-zero elements in row 2
\endcode
\code
blaze::CompressedMatrix<double,columnMajor> M2( 4UL, 7UL );
// ... Initializing the sparse matrix
M2.nonZeros(); // Returns the total number of non-zero elements in the sparse matrix
M2.nonZeros( 3 ); // Returns the number of non-zero elements in column 3
\endcode
// The free \c nonZeros() function can also be used to query the number of non-zero elements in a
// matrix expression. However, the result is not the exact number of non-zero elements, but may be
// a rough estimation:
\code
nonZeros( M1 ); // Has the same effect as the member function
nonZeros( M1, 2 ); // Has the same effect as the member function
nonZeros( M2 ); // Has the same effect as the member function
nonZeros( M2, 3 ); // Has the same effect as the member function
nonZeros( M1 * M2 ); // Estimates the number of non-zero elements in the matrix expression
\endcode
// \n \subsection matrix_operations_isnan isnan()
//
// The \c isnan() function provides the means to check a dense or sparse matrix for non-a-number
// elements:
\code
blaze::DynamicMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isnan( A ) ) { ... }
\endcode
\code
blaze::CompressedMatrix<double> A( 3UL, 4UL );
// ... Initialization
if( isnan( A ) ) { ... }
\endcode
// If at least one element of the matrix is not-a-number, the function returns \c true, otherwise
// it returns \c false. Please note that this function only works for matrices with floating point
// elements. The attempt to use it for a matrix with a non-floating point element type results in
// a compile time error.
//
//
// \n \subsection matrix_operations_isdefault isDefault()
//
// The \c isDefault() function returns whether the given dense or sparse matrix is in default state:
\code
blaze::HybridMatrix<int,5UL,4UL> A;
// ... Resizing and initialization
if( isDefault( A ) ) { ... }
\endcode
// A matrix is in default state if it appears to just have been default constructed. All resizable
// matrices (\c HybridMatrix, \c DynamicMatrix, or \c CompressedMatrix) and \c CustomMatrix are in
// default state if its size is equal to zero. A non-resizable matrix (\c StaticMatrix and all
// submatrices) is in default state if all its elements are in default state. For instance, in case
// the matrix is instantiated for a built-in integral or floating point data type, the function
// returns \c true in case all matrix elements are 0 and \c false in case any matrix element is
// not 0.
//
//
// \n \subsection matrix_operations_isSquare isSquare()
//
// Whether a dense or sparse matrix is a square matrix (i.e. if the number of rows is equal to the
// number of columns) can be checked via the \c isSquare() function:
\code
blaze::DynamicMatrix<double> A;
// ... Resizing and initialization
if( isSquare( A ) ) { ... }
\endcode
// \n \subsection matrix_operations_issymmetric isSymmetric()
//
// Via the \c isSymmetric() function it is possible to check whether a dense or sparse matrix
// is symmetric:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isSymmetric( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be symmetric!
//
//
// \n \subsection matrix_operations_isUniform isUniform()
//
// In order to check if all matrix elements are identical, the \c isUniform function can be used:
\code
blaze::DynamicMatrix<int> A;
// ... Resizing and initialization
if( isUniform( A ) ) { ... }
\endcode
// Note that in case of a sparse matrix also the zero elements are also taken into account!
//
//
// \n \subsection matrix_operations_islower isLower()
//
// Via the \c isLower() function it is possible to check whether a dense or sparse matrix is
// lower triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be lower triangular!
//
//
// \n \subsection matrix_operations_isunilower isUniLower()
//
// Via the \c isUniLower() function it is possible to check whether a dense or sparse matrix is
// lower unitriangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUniLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be lower unitriangular!
//
//
// \n \subsection matrix_operations_isstrictlylower isStrictlyLower()
//
// Via the \c isStrictlyLower() function it is possible to check whether a dense or sparse matrix
// is strictly lower triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isStrictlyLower( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be strictly lower triangular!
//
//
// \n \subsection matrix_operations_isUpper isUpper()
//
// Via the \c isUpper() function it is possible to check whether a dense or sparse matrix is
// upper triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be upper triangular!
//
//
// \n \subsection matrix_operations_isuniupper isUniUpper()
//
// Via the \c isUniUpper() function it is possible to check whether a dense or sparse matrix is
// upper unitriangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isUniUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be upper unitriangular!
//
//
// \n \subsection matrix_operations_isstrictlyupper isStrictlyUpper()
//
// Via the \c isStrictlyUpper() function it is possible to check whether a dense or sparse matrix
// is strictly upper triangular:
\code
blaze::DynamicMatrix<float> A;
// ... Resizing and initialization
if( isStrictlyUpper( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be strictly upper triangular!
//
//
// \n \subsection matrix_operations_isdiagonal isDiagonal()
//
// The \c isDiagonal() function checks if the given dense or sparse matrix is a diagonal matrix,
// i.e. if it has only elements on its diagonal and if the non-diagonal elements are default
// elements:
\code
blaze::CompressedMatrix<float> A;
// ... Resizing and initialization
if( isDiagonal( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be diagonal!
//
//
// \n \subsection matrix_operations_isidentity isIdentity()
//
// The \c isIdentity() function checks if the given dense or sparse matrix is an identity matrix,
// i.e. if all diagonal elements are 1 and all non-diagonal elements are 0:
\code
blaze::CompressedMatrix<float> A;
// ... Resizing and initialization
if( isIdentity( A ) ) { ... }
\endcode
// Note that non-square matrices are never considered to be identity matrices!
//
//
// \n \subsection matrix_operations_matrix_determinant det()
//
// The determinant of a square dense matrix can be computed by means of the \c det() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
double d = det( A ); // Compute the determinant of A
\endcode
// In case the given dense matrix is not a square matrix, a \c std::invalid_argument exception is
// thrown.
//
// \note The \c det() function can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The function is depending on LAPACK kernels. Thus the function can only be used if the
// fitting LAPACK library is available and linked to the executable. Otherwise a linker error
// will be created.
//
//
// \n \subsection matrix_operations_matrix_trans trans()
//
// Matrices can be transposed via the \c trans() function. Row-major matrices are transposed into
// a column-major matrix and vice versa:
\code
blaze::DynamicMatrix<int,rowMajor> M1( 5UL, 2UL );
blaze::CompressedMatrix<int,columnMajor> M2( 3UL, 7UL );
M1 = M2; // Assigning a column-major matrix to a row-major matrix
M1 = trans( M2 ); // Assigning the transpose of M2 (i.e. a row-major matrix) to M1
M1 += trans( M2 ); // Addition assignment of two row-major matrices
\endcode
// \n \subsection matrix_operations_ctrans ctrans()
//
// The conjugate transpose of a dense or sparse matrix (also called adjoint matrix, Hermitian
// conjugate, or transjugate) can be computed via the \c ctrans() function:
\code
blaze::DynamicMatrix< complex<float>, rowMajor > M1( 5UL, 2UL );
blaze::CompressedMatrix< complex<float>, columnMajor > M2( 2UL, 5UL );
M1 = ctrans( M2 ); // Compute the conjugate transpose matrix
\endcode
// Note that the \c ctrans() function has the same effect as manually applying the \c conj() and
// \c trans() function in any order:
\code
M1 = trans( conj( M2 ) ); // Computing the conjugate transpose matrix
M1 = conj( trans( M2 ) ); // Computing the conjugate transpose matrix
\endcode
// \n \subsection matrix_operations_matrix_evaluate eval() / evaluate()
//
// The \c evaluate() function forces an evaluation of the given matrix expression and enables
// an automatic deduction of the correct result type of an operation. The following code example
// demonstrates its intended use for the multiplication of a lower and a strictly lower dense
// matrix:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::StrictlyLowerMatrix;
LowerMatrix< DynamicMatrix<double> > A;
StrictlyLowerMatrix< DynamicMatrix<double> > B;
// ... Resizing and initialization
auto C = evaluate( A * B );
\endcode
// In this scenario, the \c evaluate() function assists in deducing the exact result type of
// the operation via the \c auto keyword. Please note that if \c evaluate() is used in this
// way, no temporary matrix is created and no copy operation is performed. Instead, the result
// is directly written to the target matrix due to the return value optimization (RVO). However,
// if \c evaluate() is used in combination with an explicit target type, a temporary will be
// created and a copy operation will be performed if the used type differs from the type
// returned from the function:
\code
StrictlyLowerMatrix< DynamicMatrix<double> > D( A * B ); // No temporary & no copy operation
LowerMatrix< DynamicMatrix<double> > E( A * B ); // Temporary & copy operation
DynamicMatrix<double> F( A * B ); // Temporary & copy operation
D = evaluate( A * B ); // Temporary & copy operation
\endcode
// Sometimes it might be desirable to explicitly evaluate a sub-expression within a larger
// expression. However, please note that \c evaluate() is not intended to be used for this
// purpose. This task is more elegantly and efficiently handled by the \c eval() function:
\code
blaze::DynamicMatrix<double> A, B, C, D;
D = A + evaluate( B * C ); // Unnecessary creation of a temporary matrix
D = A + eval( B * C ); // No creation of a temporary matrix
\endcode
// In contrast to the \c evaluate() function, \c eval() can take the complete expression
// into account and therefore can guarantee the most efficient way to evaluate it (see also
// \ref intra_statement_optimization).
//
//
// \n \section matrix_operations_modifying_operations Modifying Operations
// <hr>
//
// \subsection matrix_operations_resize_reserve .resize() / .reserve()
//
// The dimensions of a \c StaticMatrix are fixed at compile time by the second and third template
// parameter and a \c CustomMatrix cannot be resized. In contrast, the number or rows and columns
// of \c DynamicMatrix, \c HybridMatrix, and \c CompressedMatrix can be changed at runtime:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<int,rowMajor> M1;
CompressedMatrix<int,columnMajor> M2( 3UL, 2UL );
// Adapting the number of rows and columns via the resize() function. The (optional)
// third parameter specifies whether the existing elements should be preserved. Per
// default, the existing elements are preserved.
M1.resize( 2UL, 2UL ); // Resizing matrix M1 to 2x2 elements. Elements of built-in type
// remain uninitialized, elements of class type are default
// constructed.
M1.resize( 3UL, 1UL, false ); // Resizing M1 to 3x1 elements. The old elements are lost, the
// new elements are NOT initialized!
M2.resize( 5UL, 7UL, true ); // Resizing M2 to 5x7 elements. The old elements are preserved.
M2.resize( 3UL, 2UL, false ); // Resizing M2 to 3x2 elements. The old elements are lost.
\endcode
// Note that resizing a matrix invalidates all existing views (see e.g. \ref views_submatrices)
// on the matrix:
\code
typedef blaze::DynamicMatrix<int,rowMajor> MatrixType;
typedef blaze::Row<MatrixType> RowType;
MatrixType M1( 10UL, 20UL ); // Creating a 10x20 matrix
RowType row8 = row( M1, 8UL ); // Creating a view on the 8th row of the matrix
M1.resize( 6UL, 20UL ); // Resizing the matrix invalidates the view
\endcode
// When the internal capacity of a matrix is no longer sufficient, the allocation of a larger
// junk of memory is triggered. In order to avoid frequent reallocations, the \c reserve()
// function can be used up front to set the internal capacity:
\code
blaze::DynamicMatrix<int> M1;
M1.reserve( 100 );
M1.rows(); // Returns 0
M1.capacity(); // Returns at least 100
\endcode
// Additionally it is possible to reserve memory in a specific row (for a row-major matrix) or
// column (for a column-major matrix):
\code
blaze::CompressedMatrix<int> M1( 4UL, 6UL );
M1.reserve( 1, 4 ); // Reserving enough space for four non-zero elements in row 1
\endcode
// \n \subsection matrix_operations_shrinkToFit .shrinkToFit()
//
// The internal capacity of matrices with dynamic memory is preserved in order to minimize the
// number of reallocations. For that reason, the \c resize() and \c reserve() functions can lead
// to memory overhead. The \c shrinkToFit() member function can be used to minimize the internal
// capacity:
\code
blaze::DynamicMatrix<int> M1( 100UL, 100UL ); // Create a 100x100 integer matrix
M1.resize( 10UL, 10UL ); // Resize to 10x10, but the capacity is preserved
M1.shrinkToFit(); // Remove the unused capacity
\endcode
// Please note that due to padding the capacity might not be reduced exactly to \c rows() times
// \c columns(). Please also note that in case a reallocation occurs, all iterators (including
// \c end() iterators), all pointers and references to elements of this matrix are invalidated.
//
//
// \subsection matrix_operations_reset_clear reset() / clear
//
// In order to reset all elements of a dense or sparse matrix, the \c reset() function can be
// used. The number of rows and columns of the matrix are preserved:
\code
// Setting up a single precision row-major matrix, whose elements are initialized with 2.0F.
blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F );
// Resetting all elements to 0.0F.
reset( M1 ); // Resetting all elements
M1.rows(); // Returns 4: size and capacity remain unchanged
\endcode
// Alternatively, only a single row or column of the matrix can be resetted:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 7UL, 6UL, 5 ); // Setup of a row-major matrix
blaze::DynamicMatrix<int,blaze::columnMajor> M2( 4UL, 5UL, 4 ); // Setup of a column-major matrix
reset( M1, 2UL ); // Resetting the 2nd row of the row-major matrix
reset( M2, 3UL ); // Resetting the 3rd column of the column-major matrix
\endcode
// In order to reset a row of a column-major matrix or a column of a row-major matrix, use a
// row or column view (see \ref views_rows and views_colums).
//
// In order to return a matrix to its default state (i.e. the state of a default constructed
// matrix), the \c clear() function can be used:
\code
// Setting up a single precision row-major matrix, whose elements are initialized with 2.0F.
blaze::DynamicMatrix<float> M1( 4UL, 5UL, 2.0F );
// Resetting all elements to 0.0F.
clear( M1 ); // Resetting the entire matrix
M1.rows(); // Returns 0: size is reset, but capacity remains unchanged
\endcode
// \n \subsection matrix_operations_matrix_transpose transpose()
//
// In addition to the non-modifying \c trans() function, matrices can be transposed in-place via
// the \c transpose() function:
\code
blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL );
transpose( M ); // In-place transpose operation.
M = trans( M ); // Same as above
\endcode
// Note however that the transpose operation fails if ...
//
// - ... the given matrix has a fixed size and is non-square;
// - ... the given matrix is a triangular matrix;
// - ... the given submatrix affects the restricted parts of a triangular matrix;
// - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix.
//
//
// \n \subsection matrix_operations_ctranspose ctranspose()
//
// The \c ctranspose() function can be used to perform an in-place conjugate transpose operation:
\code
blaze::DynamicMatrix<int,rowMajor> M( 5UL, 2UL );
ctranspose( M ); // In-place conjugate transpose operation.
M = ctrans( M ); // Same as above
\endcode
// Note however that the conjugate transpose operation fails if ...
//
// - ... the given matrix has a fixed size and is non-square;
// - ... the given matrix is a triangular matrix;
// - ... the given submatrix affects the restricted parts of a triangular matrix;
// - ... the given submatrix would cause non-deterministic results in a symmetric/Hermitian matrix.
//
//
// \n \subsection matrix_operations_swap swap()
//
// Via the \c \c swap() function it is possible to completely swap the contents of two matrices
// of the same type:
\code
blaze::DynamicMatrix<int,blaze::rowMajor> M1( 10UL, 15UL );
blaze::DynamicMatrix<int,blaze::rowMajor> M2( 20UL, 10UL );
swap( M1, M2 ); // Swapping the contents of M1 and M2
\endcode
// \n \section matrix_operations_arithmetic_operations Arithmetic Operations
// <hr>
//
// \subsection matrix_operations_min_max min() / max()
//
// The \c min() and the \c max() functions return the smallest and largest element of the given
// dense or sparse matrix, respectively:
\code
using blaze::rowMajor;
blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -5, 2, 7 },
{ 4, 0, 1 } };
blaze::StaticMatrix<int,2UL,3UL,rowMajor> B{ { -5, 2, -7 },
{ -4, 0, -1 } };
min( A ); // Returns -5
min( B ); // Returns -7
max( A ); // Returns 7
max( B ); // Returns 2
\endcode
// In case the matrix currently has 0 rows or 0 columns, both functions return 0. Additionally, in
// case a given sparse matrix is not completely filled, the zero elements are taken into account.
// For example: the following compressed matrix has only 2 non-zero elements. However, the minimum
// of this matrix is 0:
\code
blaze::CompressedMatrix<int> C( 2UL, 3UL );
C(0,0) = 1;
C(0,2) = 3;
min( C ); // Returns 0
\endcode
// Also note that the \c min() and \c max() functions can be used to compute the smallest and
// largest element of a matrix expression:
\code
min( A + B + C ); // Returns -9, i.e. the smallest value of the resulting matrix
max( A - B - C ); // Returns 11, i.e. the largest value of the resulting matrix
\endcode
// \n \subsection matrix_operators_trace trace()
//
// The \c trace() function sums the diagonal elements of a square dense or sparse matrix:
\code
blaze::StaticMatrix<int,3UL,3UL> A{ { -1, 2, -3 }
, { -4, -5, 6 }
, { 7, -8, -9 } };
trace( A ); // Returns the sum of the diagonal elements, i.e. -15
\endcode
// In case the given matrix is not a square matrix, a \c std::invalid_argument exception is
// thrown.
//
//
// \n \subsection matrix_operators_abs abs()
//
// The \c abs() function can be used to compute the absolute values of each element of a matrix.
// For instance, the following computation
\code
blaze::StaticMatrix<int,2UL,3UL,rowMajor> A{ { -1, 2, -3 },
{ 4, -5, 6 } };
blaze::StaticMatrix<int,2UL,3UL,rowMajor> B( abs( A ) );
\endcode
// results in the matrix
\f$ B = \left(\begin{array}{*{3}{c}}
1 & 2 & 3 \\
4 & 5 & 6 \\
\end{array}\right)\f$
// \n \subsection matrix_operators_rounding_functions floor() / ceil() / trunc() / round()
//
// The \c floor(), \c ceil(), \c trunc(), and \c round() functions can be used to round down/up
// each element of a matrix, respectively:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = floor( A ); // Rounding down each element of the matrix
B = ceil ( A ); // Rounding up each element of the matrix
B = trunc( A ); // Truncating each element of the matrix
B = round( A ); // Rounding each element of the matrix
\endcode
// \n \subsection matrix_operators_conj conj()
//
// The \c conj() function can be applied on a dense or sparse matrix to compute the complex
// conjugate of each element of the matrix:
\code
using blaze::StaticMatrix;
typedef std::complex<double> cplx;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Computing the matrix of conjugate values
// ( (1, 0) (-2, 1) )
// ( (1,-1) ( 0,-1) )
StaticMatrix<cplx,2UL,2UL> B;
B = conj( A );
\endcode
// Additionally, matrices can be conjugated in-place via the \c conjugate() function:
\code
blaze::DynamicMatrix<cplx> C( 5UL, 2UL );
conjugate( C ); // In-place conjugate operation.
C = conj( C ); // Same as above
\endcode
// \n \subsection matrix_operators_real real()
//
// The \c real() function can be used on a dense or sparse matrix to extract the real part of
// each element of the matrix:
\code
using blaze::StaticMatrix;
typedef std::complex<double> cplx;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Extracting the real part of each matrix element
// ( 1 -2 )
// ( 1 0 )
StaticMatrix<double,2UL,2UL> B;
B = real( A );
\endcode
// \n \subsection matrix_operators_imag imag()
//
// The \c imag() function can be used on a dense or sparse matrix to extract the imaginary part
// of each element of the matrix:
\code
using blaze::StaticMatrix;
typedef std::complex<double> cplx;
// Creating the matrix
// ( (1,0) (-2,-1) )
// ( (1,1) ( 0, 1) )
StaticMatrix<cplx,2UL,2UL> A{ { cplx( 1.0, 0.0 ), cplx( -2.0, -1.0 ) },
{ cplx( 1.0, 1.0 ), cplx( 0.0, 1.0 ) } };
// Extracting the imaginary part of each matrix element
// ( 0 -1 )
// ( 1 1 )
StaticMatrix<double,2UL,2UL> B;
B = imag( A );
\endcode
// \n \subsection matrix_operators_sqrt sqrt() / invsqrt()
//
// Via the \c sqrt() and \c invsqrt() functions the (inverse) square root of each element of a
// matrix can be computed:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B, C;
B = sqrt( A ); // Computes the square root of each element
C = invsqrt( A ); // Computes the inverse square root of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_cbrt cbrt() / invcbrt()
//
// The \c cbrt() and \c invcbrt() functions can be used to compute the the (inverse) cubic root
// of each element of a matrix:
\code
blaze::DynamicMatrix<double> A, B, C;
B = cbrt( A ); // Computes the cubic root of each element
C = invcbrt( A ); // Computes the inverse cubic root of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_clamp clamp()
//
// The \c clamp() function can be used to restrict all elements of a matrix to a specific range:
\code
blaze::DynamicMatrix<double> A, B;
B = clamp( A, -1.0, 1.0 ); // Restrict all elements to the range [-1..1]
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_pow pow()
//
// The \c pow() function can be used to compute the exponential value of each element of a matrix:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = pow( A, 1.2 ); // Computes the exponential value of each element
\endcode
// \n \subsection matrix_operators_exp exp()
//
// \c exp(), \c exp2() and \c exp10() compute the base e/2/10 exponential of each element of a
// matrix, respectively:
\code
blaze::HybridMatrix<double,3UL,3UL> A, B;
B = exp( A ); // Computes the base e exponential of each element
B = exp2( A ); // Computes the base 2 exponential of each element
B = exp10( A ); // Computes the base 10 exponential of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_log log() / log2() / log10()
//
// The \c log(), \c log2() and \c log10() functions can be used to compute the natural, binary
// and common logarithm of each element of a matrix:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = log( A ); // Computes the natural logarithm of each element
B = log2( A ); // Computes the binary logarithm of each element
B = log10( A ); // Computes the common logarithm of each element
\endcode
// \n \subsection matrix_operators_trigonometric_functions sin() / cos() / tan() / asin() / acos() / atan()
//
// The following trigonometric functions are available for both dense and sparse matrices:
\code
blaze::DynamicMatrix<double> A, B;
B = sin( A ); // Computes the sine of each element of the matrix
B = cos( A ); // Computes the cosine of each element of the matrix
B = tan( A ); // Computes the tangent of each element of the matrix
B = asin( A ); // Computes the inverse sine of each element of the matrix
B = acos( A ); // Computes the inverse cosine of each element of the matrix
B = atan( A ); // Computes the inverse tangent of each element of the matrix
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operators_hyperbolic_functions sinh() / cosh() / tanh() / asinh() / acosh() / atanh()
//
// The following hyperbolic functions are available for both dense and sparse matrices:
\code
blaze::DynamicMatrix<double> A, B;
B = sinh( A ); // Computes the hyperbolic sine of each element of the matrix
B = cosh( A ); // Computes the hyperbolic cosine of each element of the matrix
B = tanh( A ); // Computes the hyperbolic tangent of each element of the matrix
B = asinh( A ); // Computes the inverse hyperbolic sine of each element of the matrix
B = acosh( A ); // Computes the inverse hyperbolic cosine of each element of the matrix
B = atanh( A ); // Computes the inverse hyperbolic tangent of each element of the matrix
\endcode
// \n \subsection matrix_operators_erf erf() / erfc()
//
// The \c erf() and \c erfc() functions compute the (complementary) error function of each
// element of a matrix:
\code
blaze::StaticMatrix<double,3UL,3UL> A, B;
B = erf( A ); // Computes the error function of each element
B = erfc( A ); // Computes the complementary error function of each element
\endcode
// Note that in case of sparse matrices only the non-zero elements are taken into account!
//
//
// \n \subsection matrix_operations_map map() / forEach()
//
// Via the unary and binary \c map() functions it is possible to execute componentwise custom
// operations on matrices. The unary \c map() function can be used to apply a custom operation
// on each element of a dense or sparse matrix. For instance, the following example demonstrates
// a custom square root computation via a lambda:
\code
blaze::DynamicMatrix<double> A, B;
B = map( A, []( double d ) { return std::sqrt( d ); } );
\endcode
// The binary \c map() function can be used to apply an operation pairwise to the elements of
// two dense matrices. The following example demonstrates the merging of two matrices of double
// precision values into a matrix of double precision complex numbers:
\code
blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } };
blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } };
blaze::DynamicMatrix< complex<double> > cplx;
// Creating the matrix
// ( (-2.1, 0.3) (-4.2, -1.4) )
// ( ( 1.0, 2.9) ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } );
\endcode
// Although the computation can be parallelized it is not vectorized and thus cannot perform at
// peak performance. However, it is also possible to create vectorized custom operations. See
// \ref custom_operations for a detailed overview of the possibilities of custom operations.
//
// Please note that unary custom operations on vectors have been introduced in \b Blaze 3.0 in
// form of the \c forEach() function. With the introduction of binary custom functions, the
// \c forEach() function has been renamed to \c map(). The \c forEach() function can still be
// used (even for binary custom operations), but the function might be deprecated in future
// releases of \b Blaze.
//
//
// \n \section matrix_operations_declaration_operations Declaration Operations
// <hr>
//
// \subsection matrix_operations_declsym declsym()
//
// The \c declsym() operation can be used to explicitly declare any matrix or matrix expression
// as symmetric:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declsym( A );
\endcode
// Any matrix or matrix expression that has been declared as symmetric via \c declsym() will
// gain all the benefits of a symmetric matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
DynamicMatrix<double> A, B, C;
SymmetricMatrix< DynamicMatrix<double> > S;
// ... Resizing and initialization
isSymmetric( declsym( A ) ); // Will always return true without runtime effort
S = declsym( A ); // Omit any runtime check for symmetry
C = declsym( A * B ); // Declare the result of the matrix multiplication as symmetric,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declsym() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-symmetric matrix or
// matrix expression as symmetric via the \c declsym() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declherm declherm()
//
// The \c declherm() operation can be used to explicitly declare any matrix or matrix expression
// as Hermitian:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declherm( A );
\endcode
// Any matrix or matrix expression that has been declared as Hermitian via \c declherm() will
// gain all the benefits of an Hermitian matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
DynamicMatrix<double> A, B, C;
HermitianMatrix< DynamicMatrix<double> > S;
// ... Resizing and initialization
isHermitian( declherm( A ) ); // Will always return true without runtime effort
S = declherm( A ); // Omit any runtime check for Hermitian symmetry
C = declherm( A * B ); // Declare the result of the matrix multiplication as Hermitian,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declherm() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-Hermitian matrix or
// matrix expression as Hermitian via the \c declherm() operation leads to undefined behavior
// (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_decllow decllow()
//
// The \c decllow() operation can be used to explicitly declare any matrix or matrix expression
// as lower triangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = decllow( A );
\endcode
// Any matrix or matrix expression that has been declared as lower triangular via \c decllow()
// will gain all the benefits of a lower triangular matrix, which range from reduced runtime
// checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
DynamicMatrix<double> A, B, C;
LowerMatrix< DynamicMatrix<double> > L;
// ... Resizing and initialization
isLower( decllow( A ) ); // Will always return true without runtime effort
L = decllow( A ); // Omit any runtime check for A being a lower matrix
C = decllow( A * B ); // Declare the result of the matrix multiplication as lower triangular,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c decllow() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-lower matrix or
// matrix expression as lower triangular via the \c decllow() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declupp declupp()
//
// The \c declupp() operation can be used to explicitly declare any matrix or matrix expression
// as upper triangular:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declupp( A );
\endcode
// Any matrix or matrix expression that has been declared as upper triangular via \c declupp()
// will gain all the benefits of a upper triangular matrix, which range from reduced runtime
// checking to a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::UpperMatrix;
DynamicMatrix<double> A, B, C;
UpperMatrix< DynamicMatrix<double> > U;
// ... Resizing and initialization
isUpper( declupp( A ) ); // Will always return true without runtime effort
U = declupp( A ); // Omit any runtime check for A being a upper matrix
C = declupp( A * B ); // Declare the result of the matrix multiplication as upper triangular,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declupp() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-upper matrix or
// matrix expression as upper triangular via the \c declupp() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_decldiag decldiag()
//
// The \c decldiag() operation can be used to explicitly declare any matrix or matrix expression
// as diagonal:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = decldiag( A );
\endcode
// Any matrix or matrix expression that has been declared as diagonal via \c decldiag() will
// gain all the benefits of a diagonal matrix, which range from reduced runtime checking to
// a considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::DiagonalMatrix;
DynamicMatrix<double> A, B, C;
DiagonalMatrix< DynamicMatrix<double> > D;
// ... Resizing and initialization
isDiagonal( decldiag( A ) ); // Will always return true without runtime effort
D = decldiag( A ); // Omit any runtime check for A being a diagonal matrix
C = decldiag( A * B ); // Declare the result of the matrix multiplication as diagonal,
// i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c decldiag() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-diagonal matrix
// or matrix expression as diagonal via the \c decldiag() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \subsection matrix_operations_declid declid()
//
// The \c declid() operation can be used to explicitly declare any matrix or matrix expression
// as identity matrix:
\code
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
B = declid( A );
\endcode
// Any matrix or matrix expression that has been declared as identity matrix via \c declid() will
// gain all the benefits of an identity matrix, which range from reduced runtime checking to a
// considerable speed-up in computations:
\code
using blaze::DynamicMatrix;
using blaze::DiagonalMatrix;
DynamicMatrix<double> A, B, C;
DiagonalMatrix< DynamicMatrix<double> > D;
// ... Resizing and initialization
isIdentity( declid( A ) ); // Will always return true without runtime effort
D = declid( A ); // Omit any runtime check for A being a diagonal matrix
C = declid( A ) * B; // Declare the left operand of the matrix multiplication as an
// identity matrix, i.e. perform an optimized matrix multiplication
\endcode
// \warning The \c declid() operation has the semantics of a cast: The caller is completely
// responsible and the system trusts the given information. Declaring a non-identity matrix
// or matrix expression as identity matrix via the \c declid() operation leads to undefined
// behavior (which can be violated invariants or wrong computation results)!
//
//
// \n \section matrix_operations_matrix_inversion Matrix Inversion
// <hr>
//
// The inverse of a square dense matrix can be computed via the \c inv() function:
\code
blaze::DynamicMatrix<float,blaze::rowMajor> A, B;
// ... Resizing and initialization
B = inv( A ); // Compute the inverse of A
\endcode
// Alternatively, an in-place inversion of a dense matrix can be performed via the \c invert()
// function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
invert( A ); // In-place matrix inversion
\endcode
// Both the \c inv() and the \c invert() functions will automatically select the most suited matrix
// inversion algorithm depending on the size and type of the given matrix. For small matrices of
// up to 6x6, both functions use manually optimized kernels for maximum performance. For matrices
// larger than 6x6 the inversion is performed by means of the most suited matrix decomposition
// method: In case of a general matrix the LU decomposition is used, for symmetric matrices the
// LDLT decomposition is applied, for Hermitian matrices the LDLH decomposition is performed, and
// for triangular matrices the inverse is computed via a forward or back substitution.
//
// In case the type of the matrix does not provide additional compile time information about its
// structure (symmetric, lower, upper, diagonal, ...), the information can be provided manually
// when calling the \c invert() function:
\code
using blaze::asGeneral;
using blaze::asSymmetric;
using blaze::asHermitian;
using blaze::asLower;
using blaze::asUniLower;
using blaze::asUpper;
using blaze::asUniUpper;
using blaze::asDiagonal;
invert<asGeneral> ( A ); // In-place inversion of a general matrix
invert<asSymmetric>( A ); // In-place inversion of a symmetric matrix
invert<asHermitian>( A ); // In-place inversion of a Hermitian matrix
invert<asLower> ( A ); // In-place inversion of a lower triangular matrix
invert<asUniLower> ( A ); // In-place inversion of a lower unitriangular matrix
invert<asUpper> ( A ); // In-place inversion of a upper triangular matrix
invert<asUniUpper> ( A ); // In-place inversion of a upper unitriangular matrix
invert<asDiagonal> ( A ); // In-place inversion of a diagonal matrix
\endcode
// Alternatively, via the \c invert() function it is possible to explicitly specify the inversion
// algorithm:
\code
using blaze::byLU;
using blaze::byLDLT;
using blaze::byLDLH;
using blaze::byLLH;
// In-place inversion of a general matrix by means of an LU decomposition
invert<byLU>( A );
// In-place inversion of a symmetric indefinite matrix by means of a Bunch-Kaufman decomposition
invert<byLDLT>( A );
// In-place inversion of a Hermitian indefinite matrix by means of a Bunch-Kaufman decomposition
invert<byLDLH>( A );
// In-place inversion of a positive definite matrix by means of a Cholesky decomposition
invert<byLLH>( A );
\endcode
// Whereas the inversion by means of an LU decomposition works for every general square matrix,
// the inversion by LDLT only works for symmetric indefinite matrices, the inversion by LDLH is
// restricted to Hermitian indefinite matrices and the Cholesky decomposition (LLH) only works
// for Hermitian positive definite matrices. Please note that it is in the responsibility of the
// function caller to guarantee that the selected algorithm is suited for the given matrix. In
// case this precondition is violated the result can be wrong and might not represent the inverse
// of the given matrix!
//
// For both the \c inv() and \c invert() function the matrix inversion fails if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// In all failure cases either a compilation error is created if the failure can be predicted at
// compile time or a \c std::invalid_argument exception is thrown.
//
// \note The matrix inversion can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions invert the dense matrix by means of LAPACK kernels. Thus the functions can
// only be used if a fitting LAPACK library is available and linked to the executable. Otherwise
// a linker error will be created.
//
// \note It is not possible to use any kind of view on the expression object returned by the
// \c inv() function. Also, it is not possible to access individual elements via the function call
// operator on the expression object:
\code
row( inv( A ), 2UL ); // Compilation error: Views cannot be used on an inv() expression!
inv( A )(1,2); // Compilation error: It is not possible to access individual elements!
\endcode
// \note The inversion functions do not provide any exception safety guarantee, i.e. in case an
// exception is thrown the matrix may already have been modified.
//
//
// \n \section matrix_operations_decomposition Matrix Decomposition
// <hr>
//
// \note All decomposition functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions decompose a dense matrix by means of LAPACK kernels. Thus the functions can
// only be used if a fitting LAPACK library is available and linked to the executable. Otherwise
// a linker error will be created.
//
// \subsection matrix_operations_decomposition_lu LU Decomposition
//
// The LU decomposition of a dense matrix can be computed via the \c lu() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L, U, P;
lu( A, L, U, P ); // LU decomposition of a row-major matrix
assert( A == L * U * P );
\endcode
\code
blaze::DynamicMatrix<double,blaze::columnMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::columnMajor> L, U, P;
lu( A, L, U, P ); // LU decomposition of a column-major matrix
assert( A == P * L * U );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices. Note, however, that the
// three matrices \c A, \c L and \c U are required to have the same storage order. Also, please
// note that the way the permutation matrix \c P needs to be applied differs between row-major and
// column-major matrices, since the algorithm uses column interchanges for row-major matrices and
// row interchanges for column-major matrices.
//
// Furthermore, \c lu() can be used with adaptors. For instance, the following example demonstrates
// the LU decomposition of a symmetric matrix into a lower and upper triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > U;
blaze::DynamicMatrix<double,blaze::columnMajor> P;
lu( A, L, U, P ); // LU decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_llh Cholesky Decomposition
//
// The Cholesky (LLH) decomposition of a dense matrix can be computed via the \c llh() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L;
llh( A, L ); // LLH decomposition of a row-major matrix
assert( A == L * ctrans( L ) );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the two matrices \c A
// and \c L can have any storage order.
//
// Furthermore, \c llh() can be used with adaptors. For instance, the following example demonstrates
// the LLH decomposition of a symmetric matrix into a lower triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
llh( A, L ); // Cholesky decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_qr QR Decomposition
//
// The QR decomposition of a dense matrix can be computed via the \c qr() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
blaze::DynamicMatrix<double,blaze::rowMajor> R;
qr( A, Q, R ); // QR decomposition of a row-major matrix
assert( A == Q * R );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c Q and \c R can have any storage order.
//
// Furthermore, \c qr() can be used with adaptors. For instance, the following example demonstrates
// the QR decomposition of a symmetric matrix into a general matrix and an upper triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > R;
qr( A, Q, R ); // QR decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_rq RQ Decomposition
//
// Similar to the QR decomposition, the RQ decomposition of a dense matrix can be computed via
// the \c rq() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> R;
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
rq( A, R, Q ); // RQ decomposition of a row-major matrix
assert( A == R * Q );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c R and \c Q can have any storage order.
//
// Also the \c rq() function can be used in combination with matrix adaptors. For instance, the
// following example demonstrates the RQ decomposition of an Hermitian matrix into a general
// matrix and an upper triangular matrix:
\code
blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::UpperMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > R;
blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q;
rq( A, R, Q ); // RQ decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_ql QL Decomposition
//
// The QL decomposition of a dense matrix can be computed via the \c ql() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::DynamicMatrix<double,blaze::columnMajor> L;
ql( A, Q, L ); // QL decomposition of a row-major matrix
assert( A == Q * L );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c Q and \c L can have any storage order.
//
// Also the \c ql() function can be used in combination with matrix adaptors. For instance, the
// following example demonstrates the QL decomposition of a symmetric matrix into a general
// matrix and a lower triangular matrix:
\code
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> Q;
blaze::LowerMatrix< blaze::DynamicMatrix<double,blaze::columnMajor> > L;
ql( A, Q, L ); // QL decomposition of A
\endcode
// \n \subsection matrix_operations_decomposition_lq LQ Decomposition
//
// The LQ decomposition of a dense matrix can be computed via the \c lq() function:
\code
blaze::DynamicMatrix<double,blaze::rowMajor> A;
// ... Resizing and initialization
blaze::DynamicMatrix<double,blaze::rowMajor> L;
blaze::DynamicMatrix<double,blaze::columnMajor> Q;
lq( A, L, Q ); // LQ decomposition of a row-major matrix
assert( A == L * Q );
\endcode
// The function works for both \c rowMajor and \c columnMajor matrices and the three matrices
// \c A, \c L and \c Q can have any storage order.
//
// Furthermore, \c lq() can be used with adaptors. For instance, the following example demonstrates
// the LQ decomposition of an Hermitian matrix into a lower triangular matrix and a general matrix:
\code
blaze::HermitianMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > A;
// ... Resizing and initialization
blaze::LowerMatrix< blaze::DynamicMatrix<complex<double>,blaze::columnMajor> > L;
blaze::DynamicMatrix<complex<double>,blaze::rowMajor> Q;
lq( A, L, Q ); // LQ decomposition of A
\endcode
// \n \section matrix_operations_eigenvalues Eigenvalues/Eigenvectors
// <hr>
//
// The eigenvalues and eigenvectors of a dense matrix can be computed via the \c eigen() functions:
\code
namespace blaze {
template< typename MT, bool SO, typename VT, bool TF >
void eigen( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
void eigen( const DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& V );
} // namespace blaze
\endcode
// The first function computes only the eigenvalues of the given \a n-by-\a n matrix, the second
// function additionally computes the eigenvectors. The eigenvalues are returned in the given vector
// \a w and the eigenvectors are returned in the given matrix \a V, which are both resized to the
// correct dimensions (if possible and necessary).
//
// Depending on the given matrix type, the resulting eigenvalues are either of floating point
// or complex type: In case the given matrix is either a compile time symmetric matrix with
// floating point elements or an Hermitian matrix with complex elements, the resulting eigenvalues
// will be of floating point type and therefore the elements of the given eigenvalue vector are
// expected to be of floating point type. In all other cases they are expected to be of complex
// type. Please note that for complex eigenvalues no order of eigenvalues can be assumed, except
// that complex conjugate pairs of eigenvalues appear consecutively with the eigenvalue having
// the positive imaginary part first.
//
// In case \a A is a row-major matrix, the left eigenvectors are returned in the rows of \a V,
// in case \a A is a column-major matrix, the right eigenvectors are returned in the columns of
// \a V. In case the given matrix is a compile time symmetric matrix with floating point elements,
// the resulting eigenvectors will be of floating point type and therefore the elements of the
// given eigenvector matrix are expected to be of floating point type. In all other cases they
// are expected to be of complex type.
//
// The following examples give an impression of the computation of eigenvalues and eigenvectors
// for a general, a symmetric, and an Hermitian matrix:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<double,rowMajor> A( 5UL, 5UL ); // The general matrix A
// ... Initialization
DynamicVector<complex<double>,columnVector> w( 5UL ); // The vector for the complex eigenvalues
DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
eigen( A, w, V );
\endcode
\code
using blaze::SymmetricMatrix;
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 5UL, 5UL ); // The symmetric matrix A
// ... Initialization
DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues
DynamicMatrix<double,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
eigen( A, w, V );
\endcode
\code
using blaze::HermitianMatrix;
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
HermitianMatrix< DynamicMatrix<complex<double>,rowMajor> > A( 5UL, 5UL ); // The Hermitian matrix A
// ... Initialization
DynamicVector<double,columnVector> w( 5UL ); // The vector for the real eigenvalues
DynamicMatrix<complex<double>,rowMajor> V( 5UL, 5UL ); // The matrix for the left eigenvectors
eigen( A, w, V );
\endcode
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the eigenvalue computation fails.
//
// In all failure cases an exception is thrown.
//
// \note All \c eigen() functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions compute the eigenvalues and/or eigenvectors of a dense matrix by means of
// LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is available
// and linked to the executable. Otherwise a linker error will be created.
//
//
// \n \section matrix_operations_singularvalues Singular Values/Singular Vectors
// <hr>
//
// The singular value decomposition (SVD) of a dense matrix can be computed via the \c svd()
// functions:
\code
namespace blaze {
template< typename MT, bool SO, typename VT, bool TF >
void svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3 >
void svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t svd( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename MT3, typename ST >
size_t svd( const DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp );
} // namespace blaze
\endcode
// The first and third function compute only singular values of the given general \a m-by-\a n
// matrix, the second and fourth function additionally compute singular vectors. The resulting
// singular values are returned in the given vector \a s, the left singular vectors are returned
// in the given matrix \a U, and the right singular vectors are returned in the matrix \a V. \a s,
// \a U, and \a V are resized to the correct dimensions (if possible and necessary).
//
// The third and fourth function allow for the specification of a subset of singular values and/or
// vectors. The number of singular values and vectors to be computed is specified by the lower
// bound \a low and the upper bound \a upp, which either form an integral or a floating point
// range.
//
// In case \a low and \a upp form are of integral type, the function computes all singular values
// in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored
// in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V,
// which is either resized (if possible) or expected to be a \a num-by-\a n matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all singular values
// in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are
// stored in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given
// matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n
// matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a U is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a s is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the given scalar values don't form a proper range;
// - ... the singular value decomposition fails.
//
// In all failure cases an exception is thrown.
//
// Examples:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<double,rowMajor> A( 5UL, 8UL ); // The general matrix A
// ... Initialization
DynamicMatrix<double,rowMajor> U; // The matrix for the left singular vectors
DynamicVector<double,columnVector> s; // The vector for the singular values
DynamicMatrix<double,rowMajor> V; // The matrix for the right singular vectors
svd( A, U, s, V );
\endcode
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix<complex<double>,rowMajor> A( 5UL, 8UL ); // The general matrix A
// ... Initialization
DynamicMatrix<complex<double>,rowMajor> U; // The matrix for the left singular vectors
DynamicVector<double,columnVector> s; // The vector for the singular values
DynamicMatrix<complex<double>,rowMajor> V; // The matrix for the right singular vectors
svd( A, U, s, V, 0, 2 );
\endcode
// \note All \c svd() functions can only be used for dense matrices with \c float, \c double,
// \c complex<float> or \c complex<double> element type. The attempt to call the function with
// matrices of any other element type or with a sparse matrix results in a compile time error!
//
// \note The functions compute the singular values and/or singular vectors of a dense matrix by
// means of LAPACK kernels. Thus the functions can only be used if a fitting LAPACK library is
// available and linked to the executable. Otherwise a linker error will be created.
//
//
// \n Previous: \ref matrix_types Next: \ref adaptors
*/
//*************************************************************************************************
//**Adaptors***************************************************************************************
/*!\page adaptors Adaptors
//
// \tableofcontents
//
//
// \section adaptors_general General Concepts
// <hr>
//
// Adaptors act as wrappers around the general \ref matrix_types. They adapt the interface of the
// matrices such that certain invariants are preserved. Due to this adaptors can provide a compile
// time guarantee of certain properties, which can be exploited for optimized performance.
//
// The \b Blaze library provides a total of 9 different adaptors:
//
// <ul>
// <li> \ref adaptors_symmetric_matrices </li>
// <li> \ref adaptors_hermitian_matrices </li>
// <li> \ref adaptors_triangular_matrices
// <ul>
// <li> \ref adaptors_triangular_matrices "Lower Triangular Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_lowermatrix </li>
// <li> \ref adaptors_triangular_matrices_unilowermatrix </li>
// <li> \ref adaptors_triangular_matrices_strictlylowermatrix </li>
// </ul>
// </li>
// <li> \ref adaptors_triangular_matrices "Upper Triangular Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_uppermatrix </li>
// <li> \ref adaptors_triangular_matrices_uniuppermatrix </li>
// <li> \ref adaptors_triangular_matrices_strictlyuppermatrix </li>
// </ul>
// </li>
// <li> \ref adaptors_triangular_matrices "Diagonal Matrices"
// <ul>
// <li> \ref adaptors_triangular_matrices_diagonalmatrix </li>
// </ul>
// </li>
// </ul>
// </li>
// </ul>
//
// In combination with the general matrix types, \b Blaze provides a total of 40 different matrix
// types that make it possible to exactly adapt the type of matrix to every specific problem.
//
//
// \n \section adaptors_examples Examples
// <hr>
//
// The following code examples give an impression on the use of adaptors. The first example shows
// the multiplication between two lower matrices:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
LowerMatrix< DynamicMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the
// fact that either the lower or upper part of the matrix contains only default elements and
// restrict the algorithm to the non-zero elements. Thus the adaptor provides a significant
// performance advantage in comparison to a general matrix multiplication, especially for large
// matrices.
//
// The second example shows the \c SymmetricMatrix adaptor in a row-major dense matrix/sparse
// vector multiplication:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which significantly increases the performance.
//
// \n Previous: \ref matrix_operations Next: \ref adaptors_symmetric_matrices
*/
//*************************************************************************************************
//**Symmetric Matrices*****************************************************************************
/*!\page adaptors_symmetric_matrices Symmetric Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_symmetric_matrices_general Symmetric Matrices
// <hr>
//
// In contrast to general matrices, which have no restriction in their number of rows and columns
// and whose elements can have any value, symmetric matrices provide the compile time guarantee
// to be square matrices with pair-wise identical values. Mathematically, this means that a
// symmetric matrix is always equal to its transpose (\f$ A = A^T \f$) and that all non-diagonal
// values have an identical counterpart (\f$ a_{ij} == a_{ji} \f$). This symmetry property can
// be exploited to provide higher efficiency and/or lower memory consumption. Within the \b Blaze
// library, symmetric matrices are realized by the \ref adaptors_symmetric_matrices_symmetricmatrix
// class template.
//
//
// \n \section adaptors_symmetric_matrices_symmetricmatrix SymmetricMatrix
// <hr>
//
// The SymmetricMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it
// by enforcing the additional invariant of symmetry (i.e. the matrix is always equal to its
// transpose \f$ A = A^T \f$). It can be included via the header file
\code
#include <blaze/math/SymmetricMatrix.h>
\endcode
// The type of the adapted matrix can be specified via template parameter:
\code
template< typename MT >
class SymmetricMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. SymmetricMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible symmetric matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense symmetric matrix with static memory
blaze::SymmetricMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense symmetric matrix based on HybridMatrix
blaze::SymmetricMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense symmetric matrix based on DynamicMatrix
blaze::SymmetricMatrix< blaze::DynamicMatrix<double,rowMajor> > C;
// Definition of a fixed size row-major dense symmetric matrix based on CustomMatrix
blaze::SymmetricMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision symmetric matrix
blaze::SymmetricMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > E;
\endcode
// The storage order of a symmetric matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as
// blaze::rowMajor), the symmetric matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the symmetric matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_symmetric_matrices_special_properties Special Properties of Symmetric Matrices
// <hr>
//
// A symmetric matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the symmetry constraint:
//
// -# <b>\ref adaptors_symmetric_matrices_square</b>
// -# <b>\ref adaptors_symmetric_matrices_symmetry</b>
// -# <b>\ref adaptors_symmetric_matrices_initialization</b>
//
// \n \subsection adaptors_symmetric_matrices_square Symmetric Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 symmetric dynamic matrix
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 symmetric static matrix
SymmetricMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
SymmetricMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_symmetric_matrices_symmetry The Symmetric Property is Always Enforced!
//
// This means that modifying the element \f$ a_{ij} \f$ of a symmetric matrix also modifies its
// counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that are
// symmetric themselves:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Default constructed, row-major 3x3 symmetric compressed matrix
SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 );
// Initializing three elements via the function call operator
A(0,0) = 1.0; // Initialization of the diagonal element (0,0)
A(0,2) = 2.0; // Initialization of the elements (0,2) and (2,0)
// Inserting three more elements via the insert() function
A.insert( 1, 1, 3.0 ); // Inserting the diagonal element (1,1)
A.insert( 1, 2, 4.0 ); // Inserting the elements (1,2) and (2,1)
// Access via a non-const iterator
*A.begin(1UL) = 10.0; // Modifies both elements (1,0) and (0,1)
// Erasing elements via the erase() function
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0)
// Construction from a symmetric dense matrix
StaticMatrix<double,3UL,3UL> B{ { 3.0, 8.0, -2.0 },
{ 8.0, 0.0, -1.0 },
{ -2.0, -1.0, 4.0 } };
SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-symmetric dense matrix
StaticMatrix<double,3UL,3UL> D{ { 3.0, 7.0, -2.0 },
{ 8.0, 0.0, -1.0 },
{ -2.0, -1.0, 4.0 } };
C = D; // Throws an exception; symmetric invariant would be violated!
\endcode
// The same restriction also applies to the \c append() function for sparse matrices: Appending
// the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix.
// Despite the additional insertion, the \c append() function still provides the most efficient
// way to set up a symmetric sparse matrix. In order to achieve the maximum efficiency, the
// capacity of the individual rows/columns of the matrix should to be specifically prepared with
// \c reserve() calls:
\code
using blaze::CompressedMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
// Setup of the symmetric matrix
//
// ( 0 1 3 )
// A = ( 1 2 0 )
// ( 3 0 0 )
//
SymmetricMatrix< CompressedMatrix<double,rowMajor> > A( 3 );
A.reserve( 5 ); // Reserving enough space for 5 non-zero elements
A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row
A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row
A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row
A.append( 0, 1, 1.0 ); // Appending the value 1 at position (0,1) and (1,0)
A.append( 1, 1, 2.0 ); // Appending the value 2 at position (1,1)
A.append( 2, 0, 3.0 ); // Appending the value 3 at position (2,0) and (0,2)
\endcode
// The symmetry property is also enforced for symmetric custom matrices: In case the given array
// of elements does not represent a symmetric matrix, a \c std::invalid_argument exception is
// thrown:
\code
using blaze::CustomMatrix;
using blaze::SymmetricMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
typedef SymmetricMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomSymmetric;
// Creating a 3x3 symmetric custom matrix from a properly initialized array
double array[9] = { 1.0, 2.0, 4.0,
2.0, 3.0, 5.0,
4.0, 5.0, 6.0 };
CustomSymmetric A( array, 3UL ); // OK
// Attempt to create a second 3x3 symmetric custom matrix from an uninitialized array
CustomSymmetric B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception
\endcode
// Finally, the symmetry property is enforced for views (rows, columns, submatrices, ...) on the
// symmetric matrix. The following example demonstrates that modifying the elements of an entire
// row of the symmetric matrix also affects the counterpart elements in the according column of
// the matrix:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of the symmetric matrix
//
// ( 0 1 0 2 )
// A = ( 1 3 4 0 )
// ( 0 4 0 5 )
// ( 2 0 5 0 )
//
SymmetricMatrix< DynamicMatrix<int> > A( 4 );
A(0,1) = 1;
A(0,3) = 2;
A(1,1) = 3;
A(1,2) = 4;
A(2,3) = 5;
// Setting all elements in the 1st row to 0 results in the matrix
//
// ( 0 0 0 2 )
// A = ( 0 0 0 0 )
// ( 0 0 0 5 )
// ( 2 0 5 0 )
//
row( A, 1 ) = 0;
\endcode
// The next example demonstrates the (compound) assignment to submatrices of symmetric matrices.
// Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry
// of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is
// thrown:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of two default 4x4 symmetric matrices
SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( 1 2 )
// B = ( 3 4 )
// ( 5 6 )
//
DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
// OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved
//
// ( 0 0 1 2 )
// A1 = ( 0 0 3 4 )
// ( 1 3 5 6 )
// ( 2 4 6 0 )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( 0 1 2 0 )
// A2 = ( 1 3 X 0 )
// ( 2 X 6 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_symmetric_matrices_initialization The Elements of a Dense Symmetric Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency (especially in case all default values are
// overridden afterwards), this property is important since otherwise the symmetric property of
// dense symmetric matrices could not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// Default initialized, 5x5 row-major symmetric dynamic matrix
SymmetricMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
\endcode
// \n \section adaptors_symmetric_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A SymmetricMatrix matrix can participate in numerical operations in any way any other dense
// or sparse matrix can participate. It can also be combined with any other dense or sparse vector
// or matrix. The following code example gives an impression of the use of SymmetricMatrix within
// arithmetic operations:
\code
using blaze::SymmetricMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
DynamicMatrix<double,rowMajor> A( 3, 3 );
CompressedMatrix<double,rowMajor> B( 3, 3 );
SymmetricMatrix< DynamicMatrix<double,rowMajor> > C( 3 );
SymmetricMatrix< CompressedMatrix<double,rowMajor> > D( 3 );
SymmetricMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E;
SymmetricMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major symmetric matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major symmetric matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to a symmetric matrix. In case the matrix
// to be assigned is not symmetric at compile time, a runtime check is performed.
//
//
// \n \section adaptors_symmetric_matrices_block_matrices Symmetric Block Matrices
// <hr>
//
// It is also possible to use symmetric block matrices:
\code
using blaze::CompressedMatrix;
using blaze::StaticMatrix;
using blaze::SymmetricMatrix;
// Definition of a 3x3 symmetric block matrix based on CompressedMatrix
SymmetricMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > A( 3 );
\endcode
// Also in this case, the SymmetricMatrix class template enforces the invariant of symmetry and
// guarantees that a modifications of element \f$ a_{ij} \f$ of the adapted matrix is also
// applied to element \f$ a_{ji} \f$:
\code
// Inserting the elements (2,4) and (4,2)
A.insert( 2, 4, StaticMatrix<int,3UL,3UL>{ { 1, -4, 5 },
{ 6, 8, -3 },
{ 2, -1, 2 } } );
// Manipulating the elements (2,4) and (4,2)
A(2,4)(1,1) = -5;
\endcode
// For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices.
//
//
// \n \section adaptors_symmetric_matrices_performance Performance Considerations
// <hr>
//
// When the symmetric property of a matrix is known beforehands using the SymmetricMatrix adaptor
// instead of a general matrix can be a considerable performance advantage. The \b Blaze library
// tries to exploit the properties of symmetric matrices whenever possible. However, there are
// also situations when using a symmetric matrix introduces some overhead. The following examples
// demonstrate several situations where symmetric matrices can positively or negatively impact
// performance.
//
// \n \subsection adaptors_symmetric_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact
// that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the
// multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
SymmetricMatrix< CompressedMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited
// for maximum performance. However, \b Blaze evaluates the multiplication as
\code
C = A * trans( B );
\endcode
// which significantly increases the performance since in contrast to the original formulation the
// optimized form can be vectorized. Therefore, in the context of matrix multiplications, using the
// SymmetricMatrix adapter is obviously an advantage.
//
// \n \subsection adaptors_symmetric_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar optimization is possible in case of matrix/vector multiplications:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::rowMajor;
using blaze::columnVector;
SymmetricMatrix< DynamicMatrix<double,rowMajor> > A;
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which also significantly increases the performance.
//
// \n \subsection adaptors_symmetric_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices
//
// Another example is the optimization of a row view on a column-major symmetric matrix:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::Row;
using blaze::rowMajor;
using blaze::columnMajor;
typedef SymmetricMatrix< DynamicMatrix<double,columnMajor> > DynamicSymmetric;
DynamicSymmetric A( 10UL );
Row<DynamicSymmetric> row5 = row( A, 5UL );
\endcode
// Usually, a row view on a column-major matrix results in a considerable performance decrease in
// comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix
// elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of
// the matrix, which provides the same performance as if the matrix would be row-major. Note that
// this also works for column views on row-major matrices, where \b Blaze can use the according
// row instead of a column in order to provide maximum performance.
//
// \n \subsection adaptors_symmetric_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a symmetric matrix on the right-hand side of an assignment (i.e. for read
// access), which introduces absolutely no performance penalty, using a symmetric matrix on the
// left-hand side of an assignment (i.e. for write access) may introduce additional overhead when
// it is assigned a general matrix, which is not symmetric at compile time:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
SymmetricMatrix< DynamicMatrix<double> > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the symmetric matrix; no performance penalty
C = A; // Assignment of a symmetric matrix to another symmetric matrix; no runtime overhead
C = B; // Assignment of a general matrix to a symmetric matrix; some runtime overhead
\endcode
// When assigning a general, potentially not symmetric matrix to a symmetric matrix it is necessary
// to check whether the matrix is symmetric at runtime in order to guarantee the symmetry property
// of the symmetric matrix. In case it turns out to be symmetric, it is assigned as efficiently as
// possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is
// therefore generally advisable to assign symmetric matrices to other symmetric matrices.\n
// In this context it is especially noteworthy that in contrast to additions and subtractions the
// multiplication of two symmetric matrices does not necessarily result in another symmetric matrix:
\code
SymmetricMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a symmetric matrix; no runtime overhead
C = A - B; // Results in a symmetric matrix; no runtime overhead
C = A * B; // Is not guaranteed to result in a symmetric matrix; some runtime overhead
\endcode
// \n Previous: \ref adaptors Next: \ref adaptors_hermitian_matrices
*/
//*************************************************************************************************
//**Hermitian Matrices*****************************************************************************
/*!\page adaptors_hermitian_matrices Hermitian Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_hermitian_matrices_general Hermitian Matrices
// <hr>
//
// In addition to symmetric matrices, \b Blaze also provides an adaptor for Hermitian matrices.
// Hermitian matrices provide the compile time guarantee to be square matrices with pair-wise
// conjugate complex values. Mathematically, this means that an Hermitian matrix is always equal
// to its conjugate transpose (\f$ A = \overline{A^T} \f$) and that all non-diagonal values have
// a complex conjugate counterpart (\f$ a_{ij} == \overline{a_{ji}} \f$). Within the \b Blaze
// library, Hermitian matrices are realized by the \ref adaptors_hermitian_matrices_hermitianmatrix
// class template.
//
//
// \n \section adaptors_hermitian_matrices_hermitianmatrix HermitianMatrix
// <hr>
//
// The HermitianMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant of Hermitian symmetry (i.e. the matrix is always equal to
// its conjugate transpose \f$ A = \overline{A^T} \f$). It can be included via the header file
\code
#include <blaze/math/HermitianMatrix.h>
\endcode
// The type of the adapted matrix can be specified via template parameter:
\code
template< typename MT >
class HermitianMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. HermitianMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible Hermitian matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense Hermitian matrix with static memory
blaze::HermitianMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense Hermitian matrix based on HybridMatrix
blaze::HermitianMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense Hermitian matrix based on DynamicMatrix
blaze::HermitianMatrix< blaze::DynamicMatrix<std::complex<double>,rowMajor> > C;
// Definition of a fixed size row-major dense Hermitian matrix based on CustomMatrix
blaze::HermitianMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision complex Hermitian matrix
blaze::HermitianMatrix< blaze::CompressedMatrix<std::complex<float>,rowMajor> > E;
\endcode
// The storage order of a Hermitian matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified as
// blaze::rowMajor), the Hermitian matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the Hermitian matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_hermitian_matrices_vs_symmetric_matrices Hermitian Matrices vs. Symmetric Matrices
//
// The blaze::HermitianMatrix adaptor and the blaze::SymmetricMatrix adaptor share several traits.
// However, there are a couple of differences, both from a mathematical point of view as well as
// from an implementation point of view.
//
// From a mathematical point of view, a matrix is called symmetric when it is equal to its
// transpose (\f$ A = A^T \f$) and it is called Hermitian when it is equal to its conjugate
// transpose (\f$ A = \overline{A^T} \f$). For matrices of real values, however, these two
// conditions coincide, which means that symmetric matrices of real values are also Hermitian
// and Hermitian matrices of real values are also symmetric.
//
// From an implementation point of view, \b Blaze restricts Hermitian matrices to numeric data
// types (i.e. all integral types except \c bool, floating point and complex types), whereas
// symmetric matrices can also be block matrices (i.e. can have vector or matrix elements).
// For built-in element types, the HermitianMatrix adaptor behaves exactly like the according
// SymmetricMatrix implementation. For complex element types, however, the Hermitian property
// is enforced (see also \ref adaptors_hermitian_matrices_hermitian).
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::HermitianMatrix;
using blaze::SymmetricMatrix;
// The following two matrices provide an identical experience (including performance)
HermitianMatrix< DynamicMatrix<double> > A; // Both Hermitian and symmetric
SymmetricMatrix< DynamicMatrix<double> > B; // Both Hermitian and symmetric
// The following two matrices will behave differently
HermitianMatrix< DynamicMatrix< complex<double> > > C; // Only Hermitian
SymmetricMatrix< DynamicMatrix< complex<double> > > D; // Only symmetric
// Hermitian block matrices are not allowed
HermitianMatrix< DynamicMatrix< DynamicVector<double> > > E; // Compilation error!
SymmetricMatrix< DynamicMatrix< DynamicVector<double> > > F; // Symmetric block matrix
\endcode
// \n \section adaptors_hermitian_matrices_special_properties Special Properties of Hermitian Matrices
// <hr>
//
// A Hermitian matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the Hermitian symmetry constraint:
//
// -# <b>\ref adaptors_hermitian_matrices_square</b>
// -# <b>\ref adaptors_hermitian_matrices_hermitian</b>
// -# <b>\ref adaptors_hermitian_matrices_initialization</b>
//
// \n \subsection adaptors_hermitian_matrices_square Hermitian Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 Hermitian dynamic matrix
HermitianMatrix< DynamicMatrix<std::complex<double>,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::HermitianMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 Hermitian static matrix
HermitianMatrix< StaticMatrix<std::complex<float>,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
HermitianMatrix< StaticMatrix<std::complex<float>,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_hermitian_matrices_hermitian The Hermitian Property is Always Enforced!
//
// This means that the following properties of a Hermitian matrix are always guaranteed:
//
// - The diagonal elements are real numbers, i.e. the imaginary part is zero
// - Element \f$ a_{ij} \f$ is always the complex conjugate of element \f$ a_{ji} \f$
//
// Thus modifying the element \f$ a_{ij} \f$ of a Hermitian matrix also modifies its
// counterpart element \f$ a_{ji} \f$. Also, it is only possible to assign matrices that
// are Hermitian themselves:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
typedef std::complex<double> cplx;
// Default constructed, row-major 3x3 Hermitian compressed matrix
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 );
// Initializing the matrix via the function call operator
//
// ( (1, 0) (0,0) (2,1) )
// ( (0, 0) (0,0) (0,0) )
// ( (2,-1) (0,0) (0,0) )
//
A(0,0) = cplx( 1.0, 0.0 ); // Initialization of the diagonal element (0,0)
A(0,2) = cplx( 2.0, 1.0 ); // Initialization of the elements (0,2) and (2,0)
// Inserting three more elements via the insert() function
//
// ( (1,-3) (0,0) (2, 1) )
// ( (0, 0) (2,0) (4,-2) )
// ( (2,-1) (4,2) (0, 0) )
//
A.insert( 1, 1, cplx( 2.0, 0.0 ) ); // Inserting the diagonal element (1,1)
A.insert( 1, 2, cplx( 4.0, -2.0 ) ); // Inserting the elements (1,2) and (2,1)
// Access via a non-const iterator
//
// ( (1,-3) (8,1) (2, 1) )
// ( (8,-1) (2,0) (4,-2) )
// ( (2,-1) (4,2) (0, 0) )
//
*A.begin(1UL) = cplx( 8.0, -1.0 ); // Modifies both elements (1,0) and (0,1)
// Erasing elements via the erase() function
//
// ( (0, 0) (8,1) (0, 0) )
// ( (8,-1) (2,0) (4,-2) )
// ( (0, 0) (4,2) (0, 0) )
//
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 0, 2 ); // Erasing the elements (0,2) and (2,0)
// Construction from a Hermitian dense matrix
StaticMatrix<cplx,3UL,3UL> B{ { cplx( 3.0, 0.0 ), cplx( 8.0, 2.0 ), cplx( -2.0, 2.0 ) },
{ cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( -1.0, -1.0 ) },
{ cplx( -2.0, -2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } };
HermitianMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-Hermitian dense matrix
StaticMatrix<cplx,3UL,3UL> D{ { cplx( 3.0, 0.0 ), cplx( 7.0, 2.0 ), cplx( 3.0, 2.0 ) },
{ cplx( 8.0, 1.0 ), cplx( 0.0, 0.0 ), cplx( 6.0, 4.0 ) },
{ cplx( -2.0, 2.0 ), cplx( -1.0, 1.0 ), cplx( 4.0, 0.0 ) } };
C = D; // Throws an exception; Hermitian invariant would be violated!
\endcode
// The same restriction also applies to the \c append() function for sparse matrices: Appending
// the element \f$ a_{ij} \f$ additionally inserts the element \f$ a_{ji} \f$ into the matrix.
// Despite the additional insertion, the \c append() function still provides the most efficient
// way to set up a Hermitian sparse matrix. In order to achieve the maximum efficiency, the
// capacity of the individual rows/columns of the matrix should to be specifically prepared with
// \c reserve() calls:
\code
using blaze::CompressedMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
typedef std::complex<double> cplx;
// Setup of the Hermitian matrix
//
// ( (0, 0) (1,2) (3,-4) )
// A = ( (1,-2) (2,0) (0, 0) )
// ( (3, 4) (0,0) (0, 0) )
//
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > A( 3 );
A.reserve( 5 ); // Reserving enough space for 5 non-zero elements
A.reserve( 0, 2 ); // Reserving two non-zero elements in the first row
A.reserve( 1, 2 ); // Reserving two non-zero elements in the second row
A.reserve( 2, 1 ); // Reserving a single non-zero element in the third row
A.append( 0, 1, cplx( 1.0, 2.0 ) ); // Appending an element at position (0,1) and (1,0)
A.append( 1, 1, cplx( 2.0, 0.0 ) ); // Appending an element at position (1,1)
A.append( 2, 0, cplx( 3.0, 4.0 ) ); // Appending an element at position (2,0) and (0,2)
\endcode
// The Hermitian property is also enforced for Hermitian custom matrices: In case the given array
// of elements does not represent a Hermitian matrix, a \c std::invalid_argument exception is
// thrown:
\code
using blaze::CustomMatrix;
using blaze::HermitianMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
typedef HermitianMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomHermitian;
// Creating a 3x3 Hermitian custom matrix from a properly initialized array
double array[9] = { 1.0, 2.0, 4.0,
2.0, 3.0, 5.0,
4.0, 5.0, 6.0 };
CustomHermitian A( array, 3UL ); // OK
// Attempt to create a second 3x3 Hermitian custom matrix from an uninitialized array
CustomHermitian B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception
\endcode
// Finally, the Hermitian property is enforced for views (rows, columns, submatrices, ...) on the
// Hermitian matrix. The following example demonstrates that modifying the elements of an entire
// row of the Hermitian matrix also affects the counterpart elements in the according column of
// the matrix:
\code
using blaze::DynamicMatrix;
using blaze::HermtianMatrix;
typedef std::complex<double> cplx;
// Setup of the Hermitian matrix
//
// ( (0, 0) (1,-1) (0,0) (2, 1) )
// A = ( (1, 1) (3, 0) (4,2) (0, 0) )
// ( (0, 0) (4,-2) (0,0) (5,-3) )
// ( (2,-1) (0, 0) (5,3) (0, 0) )
//
HermitianMatrix< DynamicMatrix<int> > A( 4 );
A(0,1) = cplx( 1.0, -1.0 );
A(0,3) = cplx( 2.0, 1.0 );
A(1,1) = cplx( 3.0, 0.0 );
A(1,2) = cplx( 4.0, 2.0 );
A(2,3) = cplx( 5.0, 3.0 );
// Setting all elements in the 1st row to 0 results in the matrix
//
// ( (0, 0) (0,0) (0,0) (2, 1) )
// A = ( (0, 0) (0,0) (0,0) (0, 0) )
// ( (0, 0) (0,0) (0,0) (5,-3) )
// ( (2,-1) (0,0) (5,3) (0, 0) )
//
row( A, 1 ) = cplx( 0.0, 0.0 );
\endcode
// The next example demonstrates the (compound) assignment to submatrices of Hermitian matrices.
// Since the modification of element \f$ a_{ij} \f$ of a Hermitian matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the Hermitian
// symmetry of the matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
std::complex<double> cplx;
// Setup of two default 4x4 Hermitian matrices
HermitianMatrix< DynamicMatrix<cplx> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( (1,-1) (2, 5) )
// B = ( (3, 0) (4,-6) )
// ( (5, 0) (6, 0) )
//
DynamicMatrix<int> B( 3UL, 2UL );
B(0,0) = cplx( 1.0, -1.0 );
B(0,1) = cplx( 2.0, 5.0 );
B(1,0) = cplx( 3.0, 0.0 );
B(1,1) = cplx( 4.0, -6.0 );
B(2,1) = cplx( 5.0, 0.0 );
B(2,2) = cplx( 6.0, 7.0 );
// OK: Assigning B to a submatrix of A1 such that the Hermitian property is preserved
//
// ( (0, 0) (0, 0) (1,-1) (2, 5) )
// A1 = ( (0, 0) (0, 0) (3, 0) (4,-6) )
// ( (1, 1) (3, 0) (5, 0) (6, 0) )
// ( (2,-5) (4, 6) (6, 0) (0, 0) )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the Hermitian property isn't preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( (0, 0) (1,-1) (2,5) (0,0) )
// A2 = ( (1, 1) (3, 0) (X,X) (0,0) )
// ( (2,-5) (X, X) (6,0) (0,0) )
// ( (0, 0) (0, 0) (0,0) (0,0) )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_hermitian_matrices_initialization The Elements of a Dense Hermitian Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency (especially in case all default values are
// overridden afterwards), this property is important since otherwise the Hermitian property of
// dense Hermitian matrices could not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// Default initialized, 5x5 row-major Hermitian dynamic matrix
HermitianMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
\endcode
// \n \section adaptors_hermitian_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A HermitianMatrix can be used within all numerical operations in any way any other dense or
// sparse matrix can be used. It can also be combined with any other dense or sparse vector or
// matrix. The following code example gives an impression of the use of HermitianMatrix within
// arithmetic operations:
\code
using blaze::HermitianMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
typedef complex<float> cplx;
DynamicMatrix<cplx,rowMajor> A( 3, 3 );
CompressedMatrix<cplx,rowMajor> B( 3, 3 );
HermitianMatrix< DynamicMatrix<cplx,rowMajor> > C( 3 );
HermitianMatrix< CompressedMatrix<cplx,rowMajor> > D( 3 );
HermitianMatrix< HybridMatrix<cplx,3UL,3UL,rowMajor> > E;
HermitianMatrix< StaticMatrix<cplx,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major Hermitian matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major Hermitian matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to a Hermitian matrix. In case the matrix
// to be assigned is not Hermitian at compile time, a runtime check is performed.
//
//
// \n \section adaptors_hermitian_matrices_performance Performance Considerations
// <hr>
//
// When the Hermitian property of a matrix is known beforehands using the HermitianMatrix adaptor
// instead of a general matrix can be a considerable performance advantage. This is particularly
// true in case the Hermitian matrix is also symmetric (i.e. has built-in element types). The
// \b Blaze library tries to exploit the properties of Hermitian (symmetric) matrices whenever
// possible. However, there are also situations when using a Hermitian matrix introduces some
// overhead. The following examples demonstrate several situations where Hermitian matrices can
// positively or negatively impact performance.
//
// \n \subsection adaptors_hermitian_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is symmetric, \b Blaze can exploit the fact
// that \f$ A = A^T \f$ and choose the fastest and most suited combination of storage orders for the
// multiplication. The following example demonstrates this by means of a dense matrix/sparse matrix
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Both Hermitian and symmetric
HermitianMatrix< CompressedMatrix<double,columnMajor> > B; // Both Hermitian and symmetric
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// Intuitively, the chosen combination of a row-major and a column-major matrix is the most suited
// for maximum performance. However, \b Blaze evaluates the multiplication as
\code
C = A * trans( B );
\endcode
// which significantly increases the performance since in contrast to the original formulation the
// optimized form can be vectorized. Therefore, in the context of matrix multiplications, using a
// symmetric matrix is obviously an advantage.
//
// \n \subsection adaptors_hermitian_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar optimization is possible in case of matrix/vector multiplications:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::CompressedVector;
using blaze::HermitianMatrix;
using blaze::rowMajor;
using blaze::columnVector;
HermitianMatrix< DynamicMatrix<double,rowMajor> > A; // Hermitian and symmetric
CompressedVector<double,columnVector> x;
DynamicVector<double,columnVector> y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example it is not intuitively apparent that using a row-major matrix is not the best
// possible choice in terms of performance since the computation cannot be vectorized. Choosing
// a column-major matrix instead, however, would enable a vectorized computation. Therefore
// \b Blaze exploits the fact that \c A is symmetric, selects the best suited storage order and
// evaluates the multiplication as
\code
y = trans( A ) * x;
\endcode
// which also significantly increases the performance.
//
// \n \subsection adaptors_hermitian_matrices_views Positive Impact: Row/Column Views on Column/Row-Major Matrices
//
// Another example is the optimization of a row view on a column-major symmetric matrix:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
using blaze::Row;
using blaze::rowMajor;
using blaze::columnMajor;
typedef HermitianMatrix< DynamicMatrix<double,columnMajor> > DynamicHermitian;
DynamicHermitian A( 10UL ); // Both Hermitian and symmetric
Row<DynamicHermitian> row5 = row( A, 5UL );
\endcode
// Usually, a row view on a column-major matrix results in a considerable performance decrease in
// comparison to a row view on a row-major matrix due to the non-contiguous storage of the matrix
// elements. However, in case of symmetric matrices, \b Blaze instead uses the according column of
// the matrix, which provides the same performance as if the matrix would be row-major. Note that
// this also works for column views on row-major matrices, where \b Blaze can use the according
// row instead of a column in order to provide maximum performance.
//
// \n \subsection adaptors_hermitian_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a Hermitian matrix on the right-hand side of an assignment (i.e. for read
// access), which introduces absolutely no performance penalty, using a Hermitian matrix on the
// left-hand side of an assignment (i.e. for write access) may introduce additional overhead when
// it is assigned a general matrix, which is not Hermitian at compile time:
\code
using blaze::DynamicMatrix;
using blaze::HermitianMatrix;
HermitianMatrix< DynamicMatrix< complex<double> > > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the Hermitian matrix; no performance penalty
C = A; // Assignment of a Hermitian matrix to another Hermitian matrix; no runtime overhead
C = B; // Assignment of a general matrix to a Hermitian matrix; some runtime overhead
\endcode
// When assigning a general, potentially not Hermitian matrix to a Hermitian matrix it is necessary
// to check whether the matrix is Hermitian at runtime in order to guarantee the Hermitian property
// of the Hermitian matrix. In case it turns out to be Hermitian, it is assigned as efficiently as
// possible, if it is not, an exception is thrown. In order to prevent this runtime overhead it is
// therefore generally advisable to assign Hermitian matrices to other Hermitian matrices.\n
// In this context it is especially noteworthy that in contrast to additions and subtractions the
// multiplication of two Hermitian matrices does not necessarily result in another Hermitian matrix:
\code
HermitianMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a Hermitian matrix; no runtime overhead
C = A - B; // Results in a Hermitian matrix; no runtime overhead
C = A * B; // Is not guaranteed to result in a Hermitian matrix; some runtime overhead
\endcode
// \n Previous: \ref adaptors_symmetric_matrices Next: \ref adaptors_triangular_matrices
*/
//*************************************************************************************************
//**Triangular Matrices****************************************************************************
/*!\page adaptors_triangular_matrices Triangular Matrices
//
// \tableofcontents
//
//
// \n \section adaptors_triangular_matrices_general Triangular Matrices
// <hr>
//
// Triangular matrices come in three flavors: Lower triangular matrices provide the compile time
// guarantee to be square matrices and that the upper part of the matrix contains only default
// elements that cannot be modified. Upper triangular matrices on the other hand provide the
// compile time guarantee to be square and that the lower part of the matrix contains only fixed
// default elements. Finally, diagonal matrices provide the compile time guarantee to be square
// and that both the lower and upper part of the matrix contain only immutable default elements.
// These properties can be exploited to gain higher performance and/or to save memory. Within the
// \b Blaze library, several kinds of lower and upper triangular and diagonal matrices are realized
// by the following class templates:
//
// Lower triangular matrices:
// - <b>\ref adaptors_triangular_matrices_lowermatrix</b>
// - <b>\ref adaptors_triangular_matrices_unilowermatrix</b>
// - <b>\ref adaptors_triangular_matrices_strictlylowermatrix</b>
//
// Upper triangular matrices:
// - <b>\ref adaptors_triangular_matrices_uppermatrix</b>
// - <b>\ref adaptors_triangular_matrices_uniuppermatrix</b>
// - <b>\ref adaptors_triangular_matrices_strictlyuppermatrix</b>
//
// Diagonal matrices
// - <b>\ref adaptors_triangular_matrices_diagonalmatrix</b>
//
//
// \n \section adaptors_triangular_matrices_lowermatrix LowerMatrix
// <hr>
//
// The blaze::LowerMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant that all matrix elements above the diagonal are 0 (lower
// triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
l_{0,0} & 0 & 0 & \cdots & 0 \\
l_{1,0} & l_{1,1} & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & l_{2,2} & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & l_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/LowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class LowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::LowerMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible lower matrices:
\code
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
using blaze::columnMajor;
// Definition of a 3x3 row-major dense lower matrix with static memory
blaze::LowerMatrix< blaze::StaticMatrix<int,3UL,3UL,rowMajor> > A;
// Definition of a resizable column-major dense lower matrix based on HybridMatrix
blaze::LowerMatrix< blaze::HybridMatrix<float,4UL,4UL,columnMajor> B;
// Definition of a resizable row-major dense lower matrix based on DynamicMatrix
blaze::LowerMatrix< blaze::DynamicMatrix<double,rowMajor> > C;
// Definition of a fixed size row-major dense lower matrix based on CustomMatrix
blaze::LowerMatrix< blaze::CustomMatrix<double,unaligned,unpadded,rowMajor> > D;
// Definition of a compressed row-major single precision lower matrix
blaze::LowerMatrix< blaze::CompressedMatrix<float,rowMajor> > E;
\endcode
// The storage order of a lower matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the lower matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the lower matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_unilowermatrix UniLowerMatrix
// <hr>
//
// The blaze::UniLowerMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix
// elements above the diagonal are 0 (lower unitriangular matrix):
\f[\left(\begin{array}{*{5}{c}}
1 & 0 & 0 & \cdots & 0 \\
l_{1,0} & 1 & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & 1 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 1 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UniLowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UniLowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UniLowerMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible lower unitriangular matrices:
\code
// Definition of a 3x3 row-major dense unilower matrix with static memory
blaze::UniLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense unilower matrix based on HybridMatrix
blaze::UniLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense unilower matrix based on DynamicMatrix
blaze::UniLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision unilower matrix
blaze::UniLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a lower unitriangular matrix is depending on the storage order of the
// adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the unilower matrix will also be a row-major matrix.
// Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the unilower matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_strictlylowermatrix StrictlyLowerMatrix
// <hr>
//
// The blaze::StrictlyLowerMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements and all matrix
// elements above the diagonal are 0 (strictly lower triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
0 & 0 & 0 & \cdots & 0 \\
l_{1,0} & 0 & 0 & \cdots & 0 \\
l_{2,0} & l_{2,1} & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
l_{N,0} & l_{N,1} & l_{N,2} & \cdots & 0 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/StrictlyLowerMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class StrictlyLowerMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::StrictlyLowerMatrix can be used
// with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix
// type. Note that the given matrix type must be either resizable (as for instance
// blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance
// blaze::StaticMatrix).
//
// The following examples give an impression of several possible strictly lower triangular matrices:
\code
// Definition of a 3x3 row-major dense strictly lower matrix with static memory
blaze::StrictlyLowerMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense strictly lower matrix based on HybridMatrix
blaze::StrictlyLowerMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense strictly lower matrix based on DynamicMatrix
blaze::StrictlyLowerMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision strictly lower matrix
blaze::StrictlyLowerMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a strictly lower triangular matrix is depending on the storage order of
// the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the strictly lower matrix will also be a row-major matrix.
// Otherwise if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the strictly lower matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_uppermatrix UpperMatrix
// <hr>
//
// The blaze::UpperMatrix class template is an adapter for existing dense and sparse matrix types.
// It inherits the properties and the interface of the given matrix type \c MT and extends it by
// enforcing the additional invariant that all matrix elements below the diagonal are 0 (upper
// triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
u_{0,0} & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & u_{1,1} & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & u_{2,2} & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & u_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UpperMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible upper matrices:
\code
// Definition of a 3x3 row-major dense upper matrix with static memory
blaze::UpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense upper matrix based on HybridMatrix
blaze::UpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense upper matrix based on DynamicMatrix
blaze::UpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision upper matrix
blaze::UpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of an upper matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the upper matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the upper matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_uniuppermatrix UniUpperMatrix
// <hr>
//
// The blaze::UniUpperMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements are 1 and all matrix
// elements below the diagonal are 0 (upper unitriangular matrix):
\f[\left(\begin{array}{*{5}{c}}
1 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & 1 & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & 1 & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 1 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/UniUpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class UniUpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::UniUpperMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Also,
// the given matrix type must have numeric element types (i.e. all integral types except \c bool,
// floating point and complex types). Note that the given matrix type must be either resizable (as
// for instance blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as
// for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible upper unitriangular matrices:
\code
// Definition of a 3x3 row-major dense uniupper matrix with static memory
blaze::UniUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense uniupper matrix based on HybridMatrix
blaze::UniUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense uniupper matrix based on DynamicMatrix
blaze::UniUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision uniupper matrix
blaze::UniUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of an upper unitriangular matrix is depending on the storage order of the
// adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the uniupper matrix will also be a row-major matrix.
// Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the uniupper matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_strictlyuppermatrix StrictlyUpperMatrix
// <hr>
//
// The blaze::StrictlyUpperMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all diagonal matrix elements and all matrix
// elements below the diagonal are 0 (strictly upper triangular matrix):
\f[\left(\begin{array}{*{5}{c}}
0 & u_{0,1} & u_{0,2} & \cdots & u_{0,N} \\
0 & 0 & u_{1,2} & \cdots & u_{1,N} \\
0 & 0 & 0 & \cdots & u_{2,N} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & 0 \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/StrictlyUpperMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class StrictlyUpperMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::StrictlyUpperMatrix can be used
// with any non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix
// type. Note that the given matrix type must be either resizable (as for instance
// blaze::HybridMatrix or blaze::DynamicMatrix) or must be square at compile time (as for instance
// blaze::StaticMatrix).
//
// The following examples give an impression of several possible strictly upper triangular matrices:
\code
// Definition of a 3x3 row-major dense strictly upper matrix with static memory
blaze::StrictlyUpperMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense strictly upper matrix based on HybridMatrix
blaze::StrictlyUpperMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense strictly upper matrix based on DynamicMatrix
blaze::StrictlyUpperMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision strictly upper matrix
blaze::StrictlyUpperMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a strictly upper triangular matrix is depending on the storage order of
// the adapted matrix type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e.
// is specified as blaze::rowMajor), the strictly upper matrix will also be a row-major matrix.
// Otherwise, if the adapted matrix is column-major (i.e. is specified as blaze::columnMajor),
// the strictly upper matrix will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_diagonalmatrix DiagonalMatrix
// <hr>
//
// The blaze::DiagonalMatrix class template is an adapter for existing dense and sparse matrix
// types. It inherits the properties and the interface of the given matrix type \c MT and extends
// it by enforcing the additional invariant that all matrix elements above and below the diagonal
// are 0 (diagonal matrix):
\f[\left(\begin{array}{*{5}{c}}
l_{0,0} & 0 & 0 & \cdots & 0 \\
0 & l_{1,1} & 0 & \cdots & 0 \\
0 & 0 & l_{2,2} & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \cdots & l_{N,N} \\
\end{array}\right).\f]
// It can be included via the header file
\code
#include <blaze/math/DiagonalMatrix.h>
\endcode
// The type of the adapted matrix can be specified via the first template parameter:
\code
template< typename MT >
class DiagonalMatrix;
\endcode
// \c MT specifies the type of the matrix to be adapted. blaze::DiagonalMatrix can be used with any
// non-cv-qualified, non-reference, non-pointer, non-expression dense or sparse matrix type. Note
// that the given matrix type must be either resizable (as for instance blaze::HybridMatrix or
// blaze::DynamicMatrix) or must be square at compile time (as for instance blaze::StaticMatrix).
//
// The following examples give an impression of several possible diagonal matrices:
\code
// Definition of a 3x3 row-major dense diagonal matrix with static memory
blaze::DiagonalMatrix< blaze::StaticMatrix<int,3UL,3UL,blaze::rowMajor> > A;
// Definition of a resizable column-major dense diagonal matrix based on HybridMatrix
blaze::DiagonalMatrix< blaze::HybridMatrix<float,4UL,4UL,blaze::columnMajor> B;
// Definition of a resizable row-major dense diagonal matrix based on DynamicMatrix
blaze::DiagonalMatrix< blaze::DynamicMatrix<double,blaze::rowMajor> > C;
// Definition of a compressed row-major single precision diagonal matrix
blaze::DiagonalMatrix< blaze::CompressedMatrix<float,blaze::rowMajor> > D;
\endcode
// The storage order of a diagonal matrix is depending on the storage order of the adapted matrix
// type \c MT. In case the adapted matrix is stored in a row-wise fashion (i.e. is specified
// as blaze::rowMajor), the diagonal matrix will also be a row-major matrix. Otherwise, if the
// adapted matrix is column-major (i.e. is specified as blaze::columnMajor), the diagonal matrix
// will also be a column-major matrix.
//
//
// \n \section adaptors_triangular_matrices_special_properties Special Properties of Triangular Matrices
// <hr>
//
// A triangular matrix is used exactly like a matrix of the underlying, adapted matrix type \c MT.
// It also provides (nearly) the same interface as the underlying matrix type. However, there are
// some important exceptions resulting from the triangular matrix constraint:
//
// -# <b>\ref adaptors_triangular_matrices_square</b>
// -# <b>\ref adaptors_triangular_matrices_triangular</b>
// -# <b>\ref adaptors_triangular_matrices_initialization</b>
// -# <b>\ref adaptors_triangular_matrices_storage</b>
// -# <b>\ref adaptors_triangular_matrices_scaling</b>
//
// \n \subsection adaptors_triangular_matrices_square Triangular Matrices Must Always be Square!
//
// In case a resizable matrix is used (as for instance blaze::HybridMatrix, blaze::DynamicMatrix,
// or blaze::CompressedMatrix), this means that the according constructors, the \c resize() and
// the \c extend() functions only expect a single parameter, which specifies both the number of
// rows and columns, instead of two (one for the number of rows and one for the number of columns):
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
// Default constructed, default initialized, row-major 3x3 lower dynamic matrix
LowerMatrix< DynamicMatrix<double,rowMajor> > A( 3 );
// Resizing the matrix to 5x5
A.resize( 5 );
// Extending the number of rows and columns by 2, resulting in a 7x7 matrix
A.extend( 2 );
\endcode
// In case a matrix with a fixed size is used (as for instance blaze::StaticMatrix), the number
// of rows and number of columns must be specified equally:
\code
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::columnMajor;
// Correct setup of a fixed size column-major 3x3 lower static matrix
LowerMatrix< StaticMatrix<int,3UL,3UL,columnMajor> > A;
// Compilation error: the provided matrix type is not a square matrix type
LowerMatrix< StaticMatrix<int,3UL,4UL,columnMajor> > B;
\endcode
// \n \subsection adaptors_triangular_matrices_triangular The Triangular Property is Always Enforced!
//
// This means that it is only allowed to modify elements in the lower part or the diagonal of
// a lower triangular matrix and in the upper part or the diagonal of an upper triangular matrix.
// Unitriangular and strictly triangular matrices are even more restrictive and don't allow the
// modification of diagonal elements. Also, triangular matrices can only be assigned matrices that
// don't violate their triangular property. The following example demonstrates this restriction
// by means of the blaze::LowerMatrix adaptor. For examples with other triangular matrix types
// see the according class documentations.
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
typedef LowerMatrix< CompressedMatrix<double,rowMajor> > CompressedLower;
// Default constructed, row-major 3x3 lower compressed matrix
CompressedLower A( 3 );
// Initializing elements via the function call operator
A(0,0) = 1.0; // Initialization of the diagonal element (0,0)
A(2,0) = 2.0; // Initialization of the lower element (2,0)
A(1,2) = 9.0; // Throws an exception; invalid modification of upper element
// Inserting two more elements via the insert() function
A.insert( 1, 0, 3.0 ); // Inserting the lower element (1,0)
A.insert( 2, 1, 4.0 ); // Inserting the lower element (2,1)
A.insert( 0, 2, 9.0 ); // Throws an exception; invalid insertion of upper element
// Appending an element via the append() function
A.reserve( 1, 3 ); // Reserving enough capacity in row 1
A.append( 1, 1, 5.0 ); // Appending the diagonal element (1,1)
A.append( 1, 2, 9.0 ); // Throws an exception; appending an element in the upper part
// Access via a non-const iterator
CompressedLower::Iterator it = A.begin(1);
*it = 6.0; // Modifies the lower element (1,0)
++it;
*it = 9.0; // Modifies the diagonal element (1,1)
// Erasing elements via the erase() function
A.erase( 0, 0 ); // Erasing the diagonal element (0,0)
A.erase( 2, 0 ); // Erasing the lower element (2,0)
// Construction from a lower dense matrix
StaticMatrix<double,3UL,3UL> B{ { 3.0, 0.0, 0.0 },
{ 8.0, 0.0, 0.0 },
{ -2.0, -1.0, 4.0 } };
LowerMatrix< DynamicMatrix<double,rowMajor> > C( B ); // OK
// Assignment of a non-lower dense matrix
StaticMatrix<double,3UL,3UL> D{ { 3.0, 0.0, -2.0 },
{ 8.0, 0.0, 0.0 },
{ -2.0, -1.0, 4.0 } };
C = D; // Throws an exception; lower matrix invariant would be violated!
\endcode
// The triangular property is also enforced during the construction of triangular custom matrices:
// In case the given array of elements does not represent the according triangular matrix type, a
// \c std::invalid_argument exception is thrown:
\code
using blaze::CustomMatrix;
using blaze::LowerMatrix;
using blaze::unaligned;
using blaze::unpadded;
using blaze::rowMajor;
typedef LowerMatrix< CustomMatrix<double,unaligned,unpadded,rowMajor> > CustomLower;
// Creating a 3x3 lower custom matrix from a properly initialized array
double array[9] = { 1.0, 0.0, 0.0,
2.0, 3.0, 0.0,
4.0, 5.0, 6.0 };
CustomLower A( array, 3UL ); // OK
// Attempt to create a second 3x3 lower custom matrix from an uninitialized array
CustomLower B( new double[9UL], 3UL, blaze::ArrayDelete() ); // Throws an exception
\endcode
// Finally, the triangular matrix property is enforced for views (rows, columns, submatrices, ...)
// on the triangular matrix. The following example demonstrates that modifying the elements of an
// entire row and submatrix of a lower matrix only affects the lower and diagonal matrix elements.
// Again, this example uses blaze::LowerMatrix, for examples with other triangular matrix types
// see the according class documentations.
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
// Setup of the lower matrix
//
// ( 0 0 0 0 )
// A = ( 1 2 0 0 )
// ( 0 3 0 0 )
// ( 4 0 5 0 )
//
LowerMatrix< DynamicMatrix<int> > A( 4 );
A(1,0) = 1;
A(1,1) = 2;
A(2,1) = 3;
A(3,0) = 4;
A(3,2) = 5;
// Setting the lower and diagonal elements in the 2nd row to 9 results in the matrix
//
// ( 0 0 0 0 )
// A = ( 1 2 0 0 )
// ( 9 9 9 0 )
// ( 4 0 5 0 )
//
row( A, 2 ) = 9;
// Setting the lower and diagonal elements in the 1st and 2nd column to 7 results in
//
// ( 0 0 0 0 )
// A = ( 1 7 0 0 )
// ( 9 7 7 0 )
// ( 4 7 7 0 )
//
submatrix( A, 0, 1, 4, 2 ) = 7;
\endcode
// The next example demonstrates the (compound) assignment to rows/columns and submatrices of
// triangular matrices. Since only lower/upper and potentially diagonal elements may be modified
// the matrix to be assigned must be structured such that the triangular matrix invariant of the
// matrix is preserved. Otherwise a \c std::invalid_argument exception is thrown:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::LowerMatrix;
using blaze::rowVector;
// Setup of two default 4x4 lower matrices
LowerMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of a 4-dimensional vector
//
// v = ( 1 2 3 0 )
//
DynamicVector<int,rowVector> v{ 1, 2, 3, 0 };
// OK: Assigning v to the 2nd row of A1 preserves the lower matrix invariant
//
// ( 0 0 0 0 )
// A1 = ( 0 0 0 0 )
// ( 1 2 3 0 )
// ( 0 0 0 0 )
//
row( A1, 2 ) = v; // OK
// Error: Assigning v to the 1st row of A1 violates the lower matrix invariant! The element
// marked with X cannot be assigned and triggers an exception.
//
// ( 0 0 0 0 )
// A1 = ( 1 2 X 0 )
// ( 1 2 3 0 )
// ( 0 0 0 0 )
//
row( A1, 1 ) = v; // Assignment throws an exception!
// Setup of the 3x2 dynamic matrix
//
// ( 0 0 )
// B = ( 7 0 )
// ( 8 9 )
//
DynamicMatrix<int> B( 3UL, 2UL, 0 );
B(1,0) = 7;
B(2,0) = 8;
B(2,1) = 9;
// OK: Assigning B to a submatrix of A2 such that the lower matrix invariant can be preserved
//
// ( 0 0 0 0 )
// A2 = ( 0 7 0 0 )
// ( 0 8 9 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the lower matrix invariant cannot be
// preserved! The elements marked with X cannot be assigned without violating the invariant!
//
// ( 0 0 0 0 )
// A2 = ( 0 7 X 0 )
// ( 0 8 8 X )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 2UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n \subsection adaptors_triangular_matrices_initialization The Elements of a Dense Triangular Matrix are Always Default Initialized!
//
// Although this results in a small loss of efficiency during the creation of a dense lower or
// upper matrix this initialization is important since otherwise the lower/upper matrix property
// of dense lower matrices would not be guaranteed:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::UpperMatrix;
// Uninitialized, 5x5 row-major dynamic matrix
DynamicMatrix<int,rowMajor> A( 5, 5 );
// 5x5 row-major lower dynamic matrix with default initialized upper matrix
LowerMatrix< DynamicMatrix<int,rowMajor> > B( 5 );
// 7x7 column-major upper dynamic matrix with default initialized lower matrix
UpperMatrix< DynamicMatrix<int,columnMajor> > C( 7 );
// 3x3 row-major diagonal dynamic matrix with default initialized lower and upper matrix
DiagonalMatrix< DynamicMatrix<int,rowMajor> > D( 3 );
\endcode
// \n \subsection adaptors_triangular_matrices_storage Dense Triangular Matrices Store All Elements!
//
// All dense triangular matrices store all \f$ N \times N \f$ elements, including the immutable
// elements in the lower or upper part, respectively. Therefore dense triangular matrices don't
// provide any kind of memory reduction! There are two main reasons for this: First, storing also
// the zero elements guarantees maximum performance for many algorithms that perform vectorized
// operations on the triangular matrices, which is especially true for small dense matrices.
// Second, conceptually all triangular adaptors merely restrict the interface to the matrix type
// \c MT and do not change the data layout or the underlying matrix type.
//
// This property matters most for diagonal matrices. In order to achieve the perfect combination
// of performance and memory consumption for a diagonal matrix it is recommended to use dense
// matrices for small diagonal matrices and sparse matrices for large diagonal matrices:
\code
// Recommendation 1: use dense matrices for small diagonal matrices
typedef blaze::DiagonalMatrix< blaze::StaticMatrix<float,3UL,3UL> > SmallDiagonalMatrix;
// Recommendation 2: use sparse matrices for large diagonal matrices
typedef blaze::DiagonalMatrix< blaze::CompressedMatrix<float> > LargeDiagonalMatrix;
\endcode
// \n \subsection adaptors_triangular_matrices_scaling Unitriangular Matrices Cannot Be Scaled!
//
// Since the diagonal elements of a unitriangular matrix have a fixed value of 1 it is not possible
// to self-scale such a matrix:
\code
using blaze::DynamicMatrix;
using blaze::UniLowerMatrix;
UniLowerMatrix< DynamicMatrix<int> > A( 4 );
A *= 2; // Compilation error; Scale operation is not available on an unilower matrix
A /= 2; // Compilation error; Scale operation is not available on an unilower matrix
A.scale( 2 ); // Compilation error; Scale function is not available on an unilower matrix
A = A * 2; // Throws an exception; Invalid assignment of non-unilower matrix
A = A / 2; // Throws an exception; Invalid assignment of non-unilower matrix
\endcode
// \n \section adaptors_triangular_matrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// A lower and upper triangular matrix can participate in numerical operations in any way any other
// dense or sparse matrix can participate. It can also be combined with any other dense or sparse
// vector or matrix. The following code example gives an impression of the use of blaze::LowerMatrix
// within arithmetic operations:
\code
using blaze::LowerMatrix;
using blaze::DynamicMatrix;
using blaze::HybridMatrix;
using blaze::StaticMatrix;
using blaze::CompressedMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
DynamicMatrix<double,rowMajor> A( 3, 3 );
CompressedMatrix<double,rowMajor> B( 3, 3 );
LowerMatrix< DynamicMatrix<double,rowMajor> > C( 3 );
LowerMatrix< CompressedMatrix<double,rowMajor> > D( 3 );
LowerMatrix< HybridMatrix<float,3UL,3UL,rowMajor> > E;
LowerMatrix< StaticMatrix<float,3UL,3UL,columnMajor> > F;
E = A + B; // Matrix addition and assignment to a row-major lower matrix (includes runtime check)
F = C - D; // Matrix subtraction and assignment to a column-major lower matrix (only compile time check)
F = A * D; // Matrix multiplication between a dense and a sparse matrix (includes runtime check)
C *= 2.0; // In-place scaling of matrix C
E = 2.0 * B; // Scaling of matrix B (includes runtime check)
F = C * 2.0; // Scaling of matrix C (only compile time check)
E += A - B; // Addition assignment (includes runtime check)
F -= C + D; // Subtraction assignment (only compile time check)
F *= A * D; // Multiplication assignment (includes runtime check)
\endcode
// Note that it is possible to assign any kind of matrix to a triangular matrix. In case the
// matrix to be assigned does not satisfy the invariants of the triangular matrix at compile
// time, a runtime check is performed. Also note that upper triangular, diagonal, unitriangular
// and strictly triangular matrix types can be used in the same way, but may pose some additional
// restrictions (see the according class documentations).
//
//
// \n \section adaptors_triangular_matrices_block_matrices Triangular Block Matrices
// <hr>
//
// It is also possible to use triangular block matrices:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::LowerMatrix;
using blaze::UpperMatrix;
// Definition of a 5x5 lower block matrix based on DynamicMatrix
LowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 );
// Definition of a 7x7 upper block matrix based on CompressedMatrix
UpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 );
\endcode
// Also in this case the triangular matrix invariant is enforced, i.e. it is not possible to
// manipulate elements in the upper part (lower triangular matrix) or the lower part (upper
// triangular matrix) of the matrix:
\code
const StaticMatrix<int,3UL,3UL> C{ { 1, -4, 5 },
{ 6, 8, -3 },
{ 2, -1, 2 } };
A(2,4)(1,1) = -5; // Invalid manipulation of upper matrix element; Results in an exception
B.insert( 4, 2, C ); // Invalid insertion of the elements (4,2); Results in an exception
\endcode
// Note that unitriangular matrices are restricted to numeric element types and therefore cannot
// be used for block matrices:
\code
using blaze::CompressedMatrix;
using blaze::DynamicMatrix;
using blaze::StaticMatrix;
using blaze::UniLowerMatrix;
using blaze::UniUpperMatrix;
// Compilation error: lower unitriangular matrices are restricted to numeric element types
UniLowerMatrix< DynamicMatrix< StaticMatrix<int,3UL,3UL> > > A( 5 );
// Compilation error: upper unitriangular matrices are restricted to numeric element types
UniUpperMatrix< CompressedMatrix< StaticMatrix<int,3UL,3UL> > > B( 7 );
\endcode
// For more information on block matrices, see the tutorial on \ref block_vectors_and_matrices.
//
//
// \n \section adaptors_triangular_matrices_performance Performance Considerations
// <hr>
//
// The \b Blaze library tries to exploit the properties of lower and upper triangular matrices
// whenever and wherever possible. Therefore using triangular matrices instead of a general
// matrices can result in a considerable performance improvement. However, there are also
// situations when using a triangular matrix introduces some overhead. The following examples
// demonstrate several common situations where triangular matrices can positively or negatively
// impact performance.
//
// \n \subsection adaptors_triangular_matrices_matrix_matrix_multiplication Positive Impact: Matrix/Matrix Multiplication
//
// When multiplying two matrices, at least one of which is triangular, \b Blaze can exploit the
// fact that either the lower or upper part of the matrix contains only default elements and
// restrict the algorithm to the non-zero elements. The following example demonstrates this by
// means of a dense matrix/dense matrix multiplication with lower triangular matrices:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
using blaze::rowMajor;
using blaze::columnMajor;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
LowerMatrix< DynamicMatrix<double,columnMajor> > B;
DynamicMatrix<double,columnMajor> C;
// ... Resizing and initialization
C = A * B;
\endcode
// In comparison to a general matrix multiplication, the performance advantage is significant,
// especially for large matrices. Therefore is it highly recommended to use the blaze::LowerMatrix
// and blaze::UpperMatrix adaptors when a matrix is known to be lower or upper triangular,
// respectively. Note however that the performance advantage is most pronounced for dense matrices
// and much less so for sparse matrices.
//
// \n \subsection adaptors_triangular_matrices_matrix_vector_multiplication Positive Impact: Matrix/Vector Multiplication
//
// A similar performance improvement can be gained when using a triangular matrix in a matrix/vector
// multiplication:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
LowerMatrix< DynamicMatrix<double,rowMajor> > A;
DynamicVector<double,columnVector> x, y;
// ... Resizing and initialization
y = A * x;
\endcode
// In this example, \b Blaze also exploits the structure of the matrix and approx. halves the
// runtime of the multiplication. Also in case of matrix/vector multiplications the performance
// improvement is most pronounced for dense matrices and much less so for sparse matrices.
//
// \n \subsection adaptors_triangular_matrices_assignment Negative Impact: Assignment of a General Matrix
//
// In contrast to using a triangular matrix on the right-hand side of an assignment (i.e. for
// read access), which introduces absolutely no performance penalty, using a triangular matrix
// on the left-hand side of an assignment (i.e. for write access) may introduce additional
// overhead when it is assigned a general matrix, which is not triangular at compile time:
\code
using blaze::DynamicMatrix;
using blaze::LowerMatrix;
LowerMatrix< DynamicMatrix<double> > A, C;
DynamicMatrix<double> B;
B = A; // Only read-access to the lower matrix; no performance penalty
C = A; // Assignment of a lower matrix to another lower matrix; no runtime overhead
C = B; // Assignment of a general matrix to a lower matrix; some runtime overhead
\endcode
// When assigning a general (potentially not lower triangular) matrix to a lower matrix or a
// general (potentially not upper triangular) matrix to an upper matrix it is necessary to check
// whether the matrix is lower or upper at runtime in order to guarantee the triangular property
// of the matrix. In case it turns out to be lower or upper, respectively, it is assigned as
// efficiently as possible, if it is not, an exception is thrown. In order to prevent this runtime
// overhead it is therefore generally advisable to assign lower or upper triangular matrices to
// other lower or upper triangular matrices.\n
// In this context it is especially noteworthy that the addition, subtraction, and multiplication
// of two triangular matrices of the same structure always results in another triangular matrix:
\code
LowerMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a lower matrix; no runtime overhead
C = A - B; // Results in a lower matrix; no runtime overhead
C = A * B; // Results in a lower matrix; no runtime overhead
\endcode
\code
UpperMatrix< DynamicMatrix<double> > A, B, C;
C = A + B; // Results in a upper matrix; no runtime overhead
C = A - B; // Results in a upper matrix; no runtime overhead
C = A * B; // Results in a upper matrix; no runtime overhead
\endcode
// \n Previous: \ref adaptors_hermitian_matrices Next: \ref views
*/
//*************************************************************************************************
//**Views******************************************************************************************
/*!\page views Views
//
// \tableofcontents
//
//
// \section views_general General Concepts
// <hr>
//
// Views represents parts of a vector or matrix, such as a subvector, a submatrix, or a specific
// row or column of a matrix. As such, views act as a reference to a specific part of a vector
// or matrix. This reference is valid and can be used in every way as any other vector or matrix
// can be used as long as the referenced vector or matrix is not resized or entirely destroyed.
// Views also act as alias to the elements of the vector or matrix: Changes made to the elements
// (e.g. modifying values, inserting or erasing elements) via the view are immediately visible in
// the vector or matrix and changes made via the vector or matrix are immediately visible in the
// view.
//
// The \b Blaze library provides the following views on vectors and matrices:
//
// Vector views:
// - \ref views_subvectors
//
// Matrix views:
// - \ref views_submatrices
// - \ref views_rows
// - \ref views_columns
//
//
// \n \section views_examples Examples
\code
using blaze::DynamicMatrix;
using blaze::StaticVector;
// Setup of the 3x5 row-major matrix
//
// ( 1 0 -2 3 0 )
// ( 0 2 5 -1 -1 )
// ( 1 0 0 2 1 )
//
DynamicMatrix<int> A{ { 1, 0, -2, 3, 0 },
{ 0, 2, 5, -1, -1 },
{ 1, 0, 0, 2, 1 } };
// Setup of the 2-dimensional row vector
//
// ( 18 19 )
//
StaticVector<int,rowVector> vec{ 18, 19 };
// Assigning to the elements (1,2) and (1,3) via a subvector of a row
//
// ( 1 0 -2 3 0 )
// ( 0 2 18 19 -1 )
// ( 1 0 0 2 1 )
//
subvector( row( A, 1UL ), 2UL, 2UL ) = vec;
\endcode
// \n Previous: \ref adaptors_triangular_matrices Next: \ref views_subvectors
*/
//*************************************************************************************************
//**Subvectors*************************************************************************************
/*!\page views_subvectors Subvectors
//
// \tableofcontents
//
//
// Subvectors provide views on a specific part of a dense or sparse vector. As such, subvectors
// act as a reference to a specific range within a vector. This reference is valid and can be
// used in every way any other dense or sparse vector can be used as long as the vector containing
// the subvector is not resized or entirely destroyed. The subvector also acts as an alias to the
// vector elements in the specified range: Changes made to the elements (e.g. modifying values,
// inserting or erasing elements) are immediately visible in the vector and changes made via the
// vector are immediately visible in the subvector.
//
//
// \n \section views_subvectors_class The Subvector Class Template
// <hr>
//
// The blaze::Subvector class template represents a view on a specific subvector of a dense or
// sparse vector primitive. It can be included via the header file
\code
#include <blaze/math/Subvector.h>
\endcode
// The type of the vector is specified via two template parameters:
\code
template< typename VT, bool AF >
class Subvector;
\endcode
// - \c VT: specifies the type of the vector primitive. Subvector can be used with every vector
// primitive or view, but does not work with any vector expression type.
// - \c AF: the alignment flag specifies whether the subvector is aligned (blaze::aligned) or
// unaligned (blaze::unaligned). The default value is blaze::unaligned.
//
//
// \n \section views_subvectors_setup Setup of Subvectors
// <hr>
//
// A view on a dense or sparse subvector can be created very conveniently via the \c subvector()
// function. This view can be treated as any other vector, i.e. it can be assigned to, it can
// be copied from, and it can be used in arithmetic operations. A subvector created from a row
// vector can be used as any other row vector, a subvector created from a column vector can be
// used as any other column vector. The view can also be used on both sides of an assignment:
// The subvector can either be used as an alias to grant write access to a specific subvector
// of a vector primitive on the left-hand side of an assignment or to grant read-access to a
// specific subvector of a vector primitive or expression on the right-hand side of an assignment.
// The following example demonstrates this in detail:
\code
typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType;
typedef blaze::CompressedVector<int,blaze::rowVector> SparseVectorType;
DenseVectorType d1, d2;
SparseVectorType s1, s2;
// ... Resizing and initialization
// Creating a view on the first ten elements of the dense vector d1
blaze::Subvector<DenseVectorType> dsv = subvector( d1, 0UL, 10UL );
// Creating a view on the second ten elements of the sparse vector s1
blaze::Subvector<SparseVectorType> ssv = subvector( s1, 10UL, 10UL );
// Creating a view on the addition of d2 and s2
dsv = subvector( d2 + s2, 5UL, 10UL );
// Creating a view on the multiplication of d2 and s2
ssv = subvector( d2 * s2, 2UL, 10UL );
\endcode
// The \c subvector() function can be used on any dense or sparse vector, including expressions,
// as demonstrated in the example. Note however that a blaze::Subvector can only be instantiated
// with a dense or sparse vector primitive, i.e. with types that can be written, and not with an
// expression type.
//
//
// \n \section views_subvectors_common_operations Common Operations
// <hr>
//
// A subvector view can be used like any other dense or sparse vector. For instance, the current
// number of elements can be obtained via the \c size() function, the current capacity via the
// \c capacity() function, and the number of non-zero elements via the \c nonZeros() function.
// However, since subvectors are references to a specific range of a vector, several operations
// are not possible on views, such as resizing and swapping. The following example shows this by
// means of a dense subvector view:
\code
typedef blaze::DynamicVector<int,blaze::rowVector> VectorType;
typedef blaze::Subvector<VectorType> SubvectorType;
VectorType v( 42UL );
// ... Resizing and initialization
// Creating a view on the range [5..15] of vector v
SubvectorType sv = subvector( v, 5UL, 10UL );
sv.size(); // Returns the number of elements in the subvector
sv.capacity(); // Returns the capacity of the subvector
sv.nonZeros(); // Returns the number of non-zero elements contained in the subvector
sv.resize( 84UL ); // Compilation error: Cannot resize a subvector of a vector
SubvectorType sv2 = subvector( v, 15UL, 10UL );
swap( sv, sv2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_subvectors_element_access Element Access
// <hr>
//
// The elements of a subvector can be directly accessed via the subscript operator:
\code
typedef blaze::DynamicVector<double,blaze::rowVector> VectorType;
VectorType v;
// ... Resizing and initialization
// Creating an 8-dimensional subvector, starting from index 4
blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL );
// Setting the 1st element of the subvector, which corresponds to
// the element at index 5 in vector v
sv[1] = 2.0;
\endcode
\code
typedef blaze::CompressedVector<double,blaze::rowVector> VectorType;
VectorType v;
// ... Resizing and initialization
// Creating an 8-dimensional subvector, starting from index 4
blaze::Subvector<VectorType> sv = subvector( v, 4UL, 8UL );
// Setting the 1st element of the subvector, which corresponds to
// the element at index 5 in vector v
sv[1] = 2.0;
\endcode
// The numbering of the subvector elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the specified size of the subvector. Alternatively, the elements of a subvector can
// be traversed via iterators. Just as with vectors, in case of non-const subvectors, \c begin()
// and \c end() return an Iterator, which allows a manipulation of the non-zero values, in case
// of constant subvectors a ConstIterator is returned:
\code
typedef blaze::DynamicVector<int,blaze::rowVector> VectorType;
typedef blaze::Subvector<VectorType> SubvectorType;
VectorType v( 256UL );
// ... Resizing and initialization
// Creating a reference to a specific subvector of the dense vector v
SubvectorType sv = subvector( v, 16UL, 64UL );
for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) {
*it = ...; // OK: Write access to the dense subvector value.
... = *it; // OK: Read access to the dense subvector value.
}
for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense subvector value.
}
\endcode
\code
typedef blaze::CompressedVector<int,blaze::rowVector> VectorType;
typedef blaze::Subvector<VectorType> SubvectorType;
VectorType v( 256UL );
// ... Resizing and initialization
// Creating a reference to a specific subvector of the sparse vector v
SubvectorType sv = subvector( v, 16UL, 64UL );
for( SubvectorType::Iterator it=sv.begin(); it!=sv.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
for( SubvectorType::ConstIterator it=sv.begin(); it!=sv.end(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_subvectors_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse subvector can be done by several alternative functions.
// The following example demonstrates all options:
\code
typedef blaze::CompressedVector<double,blaze::rowVector> VectorType;
VectorType v( 256UL ); // Non-initialized vector of size 256
typedef blaze::Subvector<VectorType> SubvectorType;
SubvectorType sv( subvector( v, 10UL, 60UL ) ); // View on the range [10..69] of v
// The subscript operator provides access to all possible elements of the sparse subvector,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse subvector, the element is inserted into the
// subvector.
sv[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the vector it is inserted into the vector, if it is already contained
// in the vector its value is modified.
sv.set( 45UL, -1.2 );
// An alternative for inserting elements into the subvector is the insert() function. However,
// it inserts the element only in case the element is not already contained in the subvector.
sv.insert( 50UL, 3.7 );
// Just as in case of vectors, elements can also be inserted via the append() function. In
// case of subvectors, append() also requires that the appended element's index is strictly
// larger than the currently largest non-zero index of the subvector and that the subvector's
// capacity is large enough to hold the new element. Note however that due to the nature of
// a subvector, which may be an alias to the middle of a sparse vector, the append() function
// does not work as efficiently for a subvector as it does for a vector.
sv.reserve( 10UL );
sv.append( 51UL, -2.1 );
\endcode
// \n \section views_subvectors_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse subvectors can be used in all arithmetic operations that any other dense
// or sparse vector can be used in. The following example gives an impression of the use of dense
// subvectors within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse subvectors with
// fitting element types:
\code
typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType;
typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType;
DenseVectorType d1, d2, d3;
SparseVectorType s1, s2;
// ... Resizing and initialization
typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType;
DenseMatrixType A;
typedef blaze::Subvector<DenseVectorType> SubvectorType;
SubvectorType dsv( subvector( d1, 0UL, 10UL ) ); // View on the range [0..9] of vector d1
dsv = d2; // Dense vector initialization of the range [0..9]
subvector( d1, 10UL, 10UL ) = s1; // Sparse vector initialization of the range [10..19]
d3 = dsv + d2; // Dense vector/dense vector addition
s2 = s1 + subvector( d1, 10UL, 10UL ); // Sparse vector/dense vector addition
d2 = dsv * subvector( d1, 20UL, 10UL ); // Component-wise vector multiplication
subvector( d1, 3UL, 4UL ) *= 2.0; // In-place scaling of the range [3..6]
d2 = subvector( d1, 7UL, 3UL ) * 2.0; // Scaling of the range [7..9]
d2 = 2.0 * subvector( d1, 7UL, 3UL ); // Scaling of the range [7..9]
subvector( d1, 0UL , 10UL ) += d2; // Addition assignment
subvector( d1, 10UL, 10UL ) -= s2; // Subtraction assignment
subvector( d1, 20UL, 10UL ) *= dsv; // Multiplication assignment
double scalar = subvector( d1, 5UL, 10UL ) * trans( s1 ); // Scalar/dot/inner product between two vectors
A = trans( s1 ) * subvector( d1, 4UL, 16UL ); // Outer product between two vectors
\endcode
// \n \section views_aligned_subvectors Aligned Subvectors
// <hr>
//
// Usually subvectors can be defined anywhere within a vector. They may start at any position and
// may have an arbitrary size (only restricted by the size of the underlying vector). However, in
// contrast to vectors themselves, which are always properly aligned in memory and therefore can
// provide maximum performance, this means that subvectors in general have to be considered to be
// unaligned. This can be made explicit by the blaze::unaligned flag:
\code
using blaze::unaligned;
typedef blaze::DynamicVector<double,blaze::rowVector> DenseVectorType;
DenseVectorType x;
// ... Resizing and initialization
// Identical creations of an unaligned subvector in the range [8..23]
blaze::Subvector<DenseVectorType> sv1 = subvector ( x, 8UL, 16UL );
blaze::Subvector<DenseVectorType> sv2 = subvector<unaligned>( x, 8UL, 16UL );
blaze::Subvector<DenseVectorType,unaligned> sv3 = subvector ( x, 8UL, 16UL );
blaze::Subvector<DenseVectorType,unaligned> sv4 = subvector<unaligned>( x, 8UL, 16UL );
\endcode
// All of these calls to the \c subvector() function are identical. Whether the alignment flag is
// explicitly specified or not, it always returns an unaligned subvector. Whereas this may provide
// full flexibility in the creation of subvectors, this might result in performance disadvantages
// in comparison to vector primitives (even in case the specified subvector could be aligned).
// Whereas vector primitives are guaranteed to be properly aligned and therefore provide maximum
// performance in all operations, a general view on a vector might not be properly aligned. This
// may cause a performance penalty on some platforms and/or for some operations.
//
// However, it is also possible to create aligned subvectors. Aligned subvectors are identical to
// unaligned subvectors in all aspects, except that they may pose additional alignment restrictions
// and therefore have less flexibility during creation, but don't suffer from performance penalties
// and provide the same performance as the underlying vector. Aligned subvectors are created by
// explicitly specifying the blaze::aligned flag:
\code
using blaze::aligned;
// Creating an aligned dense subvector in the range [8..23]
blaze::Subvector<DenseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL );
\endcode
// The alignment restrictions refer to system dependent address restrictions for the used element
// type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the
// first element of the subvector must be aligned. The following source code gives some examples
// for a double precision dynamic vector, assuming that AVX is available, which packs 4 \c double
// values into a SIMD vector:
\code
using blaze::aligned;
using blaze::columnVector;
typedef blaze::DynamicVector<double,columnVector> VectorType;
typedef blaze::Subvector<VectorType,aligned> SubvectorType;
VectorType d( 17UL );
// ... Resizing and initialization
// OK: Starts at the beginning, i.e. the first element is aligned
SubvectorType dsv1 = subvector<aligned>( d, 0UL, 13UL );
// OK: Start index is a multiple of 4, i.e. the first element is aligned
SubvectorType dsv2 = subvector<aligned>( d, 4UL, 7UL );
// OK: The start index is a multiple of 4 and the subvector includes the last element
SubvectorType dsv3 = subvector<aligned>( d, 8UL, 9UL );
// Error: Start index is not a multiple of 4, i.e. the first element is not aligned
SubvectorType dsv4 = subvector<aligned>( d, 5UL, 8UL );
\endcode
// Note that the discussed alignment restrictions are only valid for aligned dense subvectors.
// In contrast, aligned sparse subvectors at this time don't pose any additional restrictions.
// Therefore aligned and unaligned sparse subvectors are truly fully identical. Still, in case
// the blaze::aligned flag is specified during setup, an aligned subvector is created:
\code
using blaze::aligned;
typedef blaze::CompressedVector<double,blaze::rowVector> SparseVectorType;
SparseVectorType x;
// ... Resizing and initialization
// Creating an aligned subvector in the range [8..23]
blaze::Subvector<SparseVectorType,aligned> sv = subvector<aligned>( x, 8UL, 16UL );
\endcode
// \n \section views_subvectors_on_subvectors Subvectors on Subvectors
// <hr>
//
// It is also possible to create a subvector view on another subvector. In this context it is
// important to remember that the type returned by the \c subvector() function is the same type
// as the type of the given subvector, not a nested subvector type, since the view on a subvector
// is just another view on the underlying vector:
\code
typedef blaze::DynamicVector<double,blaze::rowVector> VectorType;
typedef blaze::Subvector<VectorType> SubvectorType;
VectorType d1;
// ... Resizing and initialization
// Creating a subvector view on the dense vector d1
SubvectorType sv1 = subvector( d1, 5UL, 10UL );
// Creating a subvector view on the dense subvector sv1
SubvectorType sv2 = subvector( sv1, 1UL, 5UL );
\endcode
// \n Previous: \ref views Next: \ref views_submatrices
*/
//*************************************************************************************************
//**Submatrices************************************************************************************
/*!\page views_submatrices Submatrices
//
// \tableofcontents
//
//
// Submatrices provide views on a specific part of a dense or sparse matrix just as subvectors
// provide views on specific parts of vectors. As such, submatrices act as a reference to a
// specific block within a matrix. This reference is valid and can be used in evary way any
// other dense or sparse matrix can be used as long as the matrix containing the submatrix is
// not resized or entirely destroyed. The submatrix also acts as an alias to the matrix elements
// in the specified block: Changes made to the elements (e.g. modifying values, inserting or
// erasing elements) are immediately visible in the matrix and changes made via the matrix are
// immediately visible in the submatrix.
//
//
// \n \section views_submatrices_class The Submatrix Class Template
// <hr>
//
// The blaze::Submatrix class template represents a view on a specific submatrix of a dense or
// sparse matrix primitive. It can be included via the header file
\code
#include <blaze/math/Submatrix.h>
\endcode
// The type of the matrix is specified via two template parameters:
\code
template< typename MT, bool AF >
class Submatrix;
\endcode
// - \c MT: specifies the type of the matrix primitive. Submatrix can be used with every matrix
// primitive, but does not work with any matrix expression type.
// - \c AF: the alignment flag specifies whether the submatrix is aligned (blaze::aligned) or
// unaligned (blaze::unaligned). The default value is blaze::unaligned.
//
//
// \n \section views_submatrices_setup Setup of Submatrices
// <hr>
//
// A view on a submatrix can be created very conveniently via the \c submatrix() function.
// This view can be treated as any other matrix, i.e. it can be assigned to, it can be copied
// from, and it can be used in arithmetic operations. A submatrix created from a row-major
// matrix will itself be a row-major matrix, a submatrix created from a column-major matrix
// will be a column-major matrix. The view can also be used on both sides of an assignment:
// The submatrix can either be used as an alias to grant write access to a specific submatrix
// of a matrix primitive on the left-hand side of an assignment or to grant read-access to
// a specific submatrix of a matrix primitive or expression on the right-hand side of an
// assignment. The following example demonstrates this in detail:
\code
typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType;
typedef blaze::CompressedVector<int,blaze::columnMajor> SparseMatrixType;
DenseMatrixType D1, D2;
SparseMatrixType S1, S2;
// ... Resizing and initialization
// Creating a view on the first 8x16 block of the dense matrix D1
blaze::Submatrix<DenseMatrixType> dsm = submatrix( D1, 0UL, 0UL, 8UL, 16UL );
// Creating a view on the second 8x16 block of the sparse matrix S1
blaze::Submatrix<SparseMatrixType> ssm = submatrix( S1, 0UL, 16UL, 8UL, 16UL );
// Creating a view on the addition of D2 and S2
dsm = submatrix( D2 + S2, 5UL, 10UL, 8UL, 16UL );
// Creating a view on the multiplication of D2 and S2
ssm = submatrix( D2 * S2, 7UL, 13UL, 8UL, 16UL );
\endcode
// \n \section views_submatrices_common_operations Common Operations
// <hr>
//
// The current size of the matrix, i.e. the number of rows or columns can be obtained via the
// \c rows() and \c columns() functions, the current total capacity via the \c capacity() function,
// and the number of non-zero elements via the \c nonZeros() function. However, since submatrices
// are views on a specific submatrix of a matrix, several operations are not possible on views,
// such as resizing and swapping:
\code
typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType;
typedef blaze::Submatrix<MatrixType> SubmatrixType;
MatrixType A;
// ... Resizing and initialization
// Creating a view on the a 8x12 submatrix of matrix A
SubmatrixType sm = submatrix( A, 0UL, 0UL, 8UL, 12UL );
sm.rows(); // Returns the number of rows of the submatrix
sm.columns(); // Returns the number of columns of the submatrix
sm.capacity(); // Returns the capacity of the submatrix
sm.nonZeros(); // Returns the number of non-zero elements contained in the submatrix
sm.resize( 10UL, 8UL ); // Compilation error: Cannot resize a submatrix of a matrix
SubmatrixType sm2 = submatrix( A, 8UL, 0UL, 12UL, 8UL );
swap( sm, sm2 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_submatrices_element_access Element Access
// <hr>
//
// The elements of a submatrix can be directly accessed with the function call operator:
\code
typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType;
MatrixType A;
// ... Resizing and initialization
// Creating a 8x8 submatrix, starting from position (4,4)
blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL );
// Setting the element (0,0) of the submatrix, which corresponds to
// the element at position (4,4) in matrix A
sm(0,0) = 2.0;
\endcode
\code
typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType;
MatrixType A;
// ... Resizing and initialization
// Creating a 8x8 submatrix, starting from position (4,4)
blaze::Submatrix<MatrixType> sm = submatrix( A, 4UL, 4UL, 8UL, 8UL );
// Setting the element (0,0) of the submatrix, which corresponds to
// the element at position (4,4) in matrix A
sm(0,0) = 2.0;
\endcode
// Alternatively, the elements of a submatrix can be traversed via (const) iterators. Just as
// with matrices, in case of non-const submatrices, \c begin() and \c end() return an Iterator,
// which allows a manipulation of the non-zero values, in case of constant submatrices a
// ConstIterator is returned:
\code
typedef blaze::DynamicMatrix<int,blaze::rowMajor> MatrixType;
typedef blaze::Submatrix<MatrixType> SubmatrixType;
MatrixType A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a specific submatrix of the dense matrix A
SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL );
// Traversing the elements of the 0th row via iterators to non-const elements
for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) {
*it = ...; // OK: Write access to the dense submatrix value.
... = *it; // OK: Read access to the dense submatrix value.
}
// Traversing the elements of the 1st row via iterators to const elements
for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense submatrix value.
}
\endcode
\code
typedef blaze::CompressedMatrix<int,blaze::rowMajor> MatrixType;
typedef blaze::Submatrix<MatrixType> SubmatrixType;
MatrixType A( 256UL, 512UL );
// ... Resizing and initialization
// Creating a reference to a specific submatrix of the sparse matrix A
SubmatrixType sm = submatrix( A, 16UL, 16UL, 64UL, 128UL );
// Traversing the elements of the 0th row via iterators to non-const elements
for( SubmatrixType::Iterator it=sm.begin(0); it!=sm.end(0); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
// Traversing the elements of the 1st row via iterators to const elements
for( SubmatrixType::ConstIterator it=sm.begin(1); it!=sm.end(1); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_submatrices_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse submatrix can be done by several alternative functions.
// The following example demonstrates all options:
\code
typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType;
MatrixType A( 256UL, 512UL ); // Non-initialized matrix of size 256x512
typedef blaze::Submatrix<MatrixType> SubmatrixType;
SubmatrixType sm = submatrix( A, 10UL, 10UL, 16UL, 16UL ); // View on a 16x16 submatrix of A
// The function call operator provides access to all possible elements of the sparse submatrix,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse submatrix, the element is inserted into the
// submatrix.
sm(2,4) = 2.0;
// The second operation for inserting elements is the set() function. In case the element is
// not contained in the submatrix it is inserted into the submatrix, if it is already contained
// in the submatrix its value is modified.
sm.set( 2UL, 5UL, -1.2 );
// An alternative for inserting elements into the submatrix is the insert() function. However,
// it inserts the element only in case the element is not already contained in the submatrix.
sm.insert( 2UL, 6UL, 3.7 );
// Just as in case of sparse matrices, elements can also be inserted via the append() function.
// In case of submatrices, append() also requires that the appended element's index is strictly
// larger than the currently largest non-zero index in the according row or column of the
// submatrix and that the according row's or column's capacity is large enough to hold the new
// element. Note however that due to the nature of a submatrix, which may be an alias to the
// middle of a sparse matrix, the append() function does not work as efficiently for a
// submatrix as it does for a matrix.
sm.reserve( 2UL, 10UL );
sm.append( 2UL, 10UL, -2.1 );
\endcode
// \n \section views_submatrices_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse submatrices can be used in all arithmetic operations that any other dense
// or sparse matrix can be used in. The following example gives an impression of the use of dense
// submatrices within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse matrices with
// fitting element types:
\code
typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType;
typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType;
DenseMatrixType D1, D2, D3;
SparseMatrixType S1, S2;
typedef blaze::CompressedVector<double,blaze::columnVector> SparseVectorType;
SparseVectorType a, b;
// ... Resizing and initialization
typedef Submatrix<DenseMatrixType> SubmatrixType;
SubmatrixType sm = submatrix( D1, 0UL, 0UL, 8UL, 8UL ); // View on the 8x8 submatrix of matrix D1
// starting from row 0 and column 0
submatrix( D1, 0UL, 8UL, 8UL, 8UL ) = D2; // Dense matrix initialization of the 8x8 submatrix
// starting in row 0 and column 8
sm = S1; // Sparse matrix initialization of the second 8x8 submatrix
D3 = sm + D2; // Dense matrix/dense matrix addition
S2 = S1 - submatrix( D1, 8UL, 0UL, 8UL, 8UL ); // Sparse matrix/dense matrix subtraction
D2 = sm * submatrix( D1, 8UL, 8UL, 8UL, 8UL ); // Dense matrix/dense matrix multiplication
submatrix( D1, 8UL, 0UL, 8UL, 8UL ) *= 2.0; // In-place scaling of a submatrix of D1
D2 = submatrix( D1, 8UL, 8UL, 8UL, 8UL ) * 2.0; // Scaling of the a submatrix of D1
D2 = 2.0 * sm; // Scaling of the a submatrix of D1
submatrix( D1, 0UL, 8UL, 8UL, 8UL ) += D2; // Addition assignment
submatrix( D1, 8UL, 0UL, 8UL, 8UL ) -= S1; // Subtraction assignment
submatrix( D1, 8UL, 8UL, 8UL, 8UL ) *= sm; // Multiplication assignment
a = submatrix( D1, 4UL, 4UL, 8UL, 8UL ) * b; // Dense matrix/sparse vector multiplication
\endcode
// \n \section views_aligned_submatrices Aligned Submatrices
// <hr>
//
// Usually submatrices can be defined anywhere within a matrix. They may start at any position and
// may have an arbitrary extension (only restricted by the extension of the underlying matrix).
// However, in contrast to matrices themselves, which are always properly aligned in memory and
// therefore can provide maximum performance, this means that submatrices in general have to be
// considered to be unaligned. This can be made explicit by the blaze::unaligned flag:
\code
using blaze::unaligned;
typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrixType;
DenseMatrixType A;
// ... Resizing and initialization
// Identical creations of an unaligned submatrix of size 8x8, starting in row 0 and column 0
blaze::Submatrix<DenseMatrixType> sm1 = submatrix ( A, 0UL, 0UL, 8UL, 8UL );
blaze::Submatrix<DenseMatrixType> sm2 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL );
blaze::Submatrix<DenseMatrixType,unaligned> sm3 = submatrix ( A, 0UL, 0UL, 8UL, 8UL );
blaze::Submatrix<DenseMatrixType,unaligned> sm4 = submatrix<unaligned>( A, 0UL, 0UL, 8UL, 8UL );
\endcode
// All of these calls to the \c submatrix() function are identical. Whether the alignment flag is
// explicitly specified or not, it always returns an unaligned submatrix. Whereas this may provide
// full flexibility in the creation of submatrices, this might result in performance disadvantages
// in comparison to matrix primitives (even in case the specified submatrix could be aligned).
// Whereas matrix primitives are guaranteed to be properly aligned and therefore provide maximum
// performance in all operations, a general view on a matrix might not be properly aligned. This
// may cause a performance penalty on some platforms and/or for some operations.
//
// However, it is also possible to create aligned submatrices. Aligned submatrices are identical to
// unaligned submatrices in all aspects, except that they may pose additional alignment restrictions
// and therefore have less flexibility during creation, but don't suffer from performance penalties
// and provide the same performance as the underlying matrix. Aligned submatrices are created by
// explicitly specifying the blaze::aligned flag:
\code
using blaze::aligned;
// Creating an aligned submatrix of size 8x8, starting in row 0 and column 0
blaze::Submatrix<DenseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL );
\endcode
// The alignment restrictions refer to system dependent address restrictions for the used element
// type and the available vectorization mode (SSE, AVX, ...). In order to be properly aligned the
// first element of each row/column of the submatrix must be aligned. The following source code
// gives some examples for a double precision row-major dynamic matrix, assuming that padding is
// enabled and that AVX is available, which packs 4 \c double values into a SIMD vector:
\code
using blaze::aligned;
using blaze::rowMajor;
typedef blaze::DynamicMatrix<double,rowMajor> MatrixType;
typedef blaze::Submatrix<MatrixType,aligned> SubmatrixType;
MatrixType D( 13UL, 17UL );
// ... Resizing and initialization
// OK: Starts at position (0,0), i.e. the first element of each row is aligned (due to padding)
SubmatrixType dsm1 = submatrix<aligned>( D, 0UL, 0UL, 7UL, 11UL );
// OK: First column is a multiple of 4, i.e. the first element of each row is aligned (due to padding)
SubmatrixType dsm2 = submatrix<aligned>( D, 3UL, 12UL, 8UL, 16UL );
// OK: First column is a multiple of 4 and the submatrix includes the last row and column
SubmatrixType dsm3 = submatrix<aligned>( D, 4UL, 0UL, 9UL, 17UL );
// Error: First column is not a multiple of 4, i.e. the first element is not aligned
SubmatrixType dsm4 = submatrix<aligned>( D, 2UL, 3UL, 12UL, 12UL );
\endcode
// Note that the discussed alignment restrictions are only valid for aligned dense submatrices.
// In contrast, aligned sparse submatrices at this time don't pose any additional restrictions.
// Therefore aligned and unaligned sparse submatrices are truly fully identical. Still, in case
// the blaze::aligned flag is specified during setup, an aligned submatrix is created:
\code
using blaze::aligned;
typedef blaze::CompressedMatrix<double,blaze::rowMajor> SparseMatrixType;
SparseMatrixType A;
// ... Resizing and initialization
// Creating an aligned submatrix of size 8x8, starting in row 0 and column 0
blaze::Submatrix<SparseMatrixType,aligned> sv = submatrix<aligned>( A, 0UL, 0UL, 8UL, 8UL );
\endcode
// \n \section views_submatrices_on_submatrices Submatrices on Submatrices
// <hr>
//
// It is also possible to create a submatrix view on another submatrix. In this context it is
// important to remember that the type returned by the \c submatrix() function is the same type
// as the type of the given submatrix, since the view on a submatrix is just another view on the
// underlying matrix:
\code
typedef blaze::DynamicMatrix<double,blaze::rowMajor> MatrixType;
typedef blaze::Submatrix<MatrixType> SubmatrixType;
MatrixType D1;
// ... Resizing and initialization
// Creating a submatrix view on the dense matrix D1
SubmatrixType sm1 = submatrix( D1, 4UL, 4UL, 8UL, 16UL );
// Creating a submatrix view on the dense submatrix sm1
SubmatrixType sm2 = submatrix( sm1, 1UL, 1UL, 4UL, 8UL );
\endcode
// \n \section views_submatrices_on_symmetric_matrices Submatrices on Symmetric Matrices
//
// Submatrices can also be created on symmetric matrices (see the \c SymmetricMatrix class template):
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
using blaze::Submatrix;
typedef SymmetricMatrix< DynamicMatrix<int> > SymmetricDynamicType;
typedef Submatrix< SymmetricDynamicType > SubmatrixType;
// Setup of a 16x16 symmetric matrix
SymmetricDynamicType A( 16UL );
// Creating a dense submatrix of size 8x12, starting in row 2 and column 4
SubmatrixType sm = submatrix( A, 2UL, 4UL, 8UL, 12UL );
\endcode
// It is important to note, however, that (compound) assignments to such submatrices have a
// special restriction: The symmetry of the underlying symmetric matrix must not be broken!
// Since the modification of element \f$ a_{ij} \f$ of a symmetric matrix also modifies the
// element \f$ a_{ji} \f$, the matrix to be assigned must be structured such that the symmetry
// of the symmetric matrix is preserved. Otherwise a \c std::invalid_argument exception is
// thrown:
\code
using blaze::DynamicMatrix;
using blaze::SymmetricMatrix;
// Setup of two default 4x4 symmetric matrices
SymmetricMatrix< DynamicMatrix<int> > A1( 4 ), A2( 4 );
// Setup of the 3x2 dynamic matrix
//
// ( 1 2 )
// B = ( 3 4 )
// ( 5 6 )
//
DynamicMatrix<int> B{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
// OK: Assigning B to a submatrix of A1 such that the symmetry can be preserved
//
// ( 0 0 1 2 )
// A1 = ( 0 0 3 4 )
// ( 1 3 5 6 )
// ( 2 4 6 0 )
//
submatrix( A1, 0UL, 2UL, 3UL, 2UL ) = B; // OK
// Error: Assigning B to a submatrix of A2 such that the symmetry cannot be preserved!
// The elements marked with X cannot be assigned unambiguously!
//
// ( 0 1 2 0 )
// A2 = ( 1 3 X 0 )
// ( 2 X 6 0 )
// ( 0 0 0 0 )
//
submatrix( A2, 0UL, 1UL, 3UL, 2UL ) = B; // Assignment throws an exception!
\endcode
// \n Previous: \ref views_subvectors Next: \ref views_rows
*/
//*************************************************************************************************
//**Rows*******************************************************************************************
/*!\page views_rows Rows
//
// \tableofcontents
//
//
// Rows provide views on a specific row of a dense or sparse matrix. As such, rows act as a
// reference to a specific row. This reference is valid and can be used in every way any other
// row vector can be used as long as the matrix containing the row is not resized or entirely
// destroyed. The row also acts as an alias to the row elements: Changes made to the elements
// (e.g. modifying values, inserting or erasing elements) are immediately visible in the matrix
// and changes made via the matrix are immediately visible in the row.
//
//
// \n \section views_rows_class The Row Class Template
// <hr>
//
// The blaze::Row class template represents a reference to a specific row of a dense or sparse
// matrix primitive. It can be included via the header file
\code
#include <blaze/math/Row.h>
\endcode
// The type of the matrix is specified via template parameter:
\code
template< typename MT >
class Row;
\endcode
// \c MT specifies the type of the matrix primitive. Row can be used with every matrix primitive,
// but does not work with any matrix expression type.
//
//
// \n \section views_rows_setup Setup of Rows
// <hr>
//
// A reference to a dense or sparse row can be created very conveniently via the \c row() function.
// This reference can be treated as any other row vector, i.e. it can be assigned to, it can be
// copied from, and it can be used in arithmetic operations. The reference can also be used on
// both sides of an assignment: The row can either be used as an alias to grant write access to a
// specific row of a matrix primitive on the left-hand side of an assignment or to grant read-access
// to a specific row of a matrix primitive or expression on the right-hand side of an assignment.
// The following two examples demonstrate this for dense and sparse matrices:
\code
typedef blaze::DynamicVector<double,rowVector> DenseVectorType;
typedef blaze::CompressedVector<double,rowVector> SparseVectorType;
typedef blaze::DynamicMatrix<double,rowMajor> DenseMatrixType;
typedef blaze::CompressedMatrix<double,rowMajor> SparseMatrixType;
DenseVectorType x;
SparseVectorType y;
DenseMatrixType A, B;
SparseMatrixType C, D;
// ... Resizing and initialization
// Setting the 2nd row of matrix A to x
blaze::Row<DenseMatrixType> row2 = row( A, 2UL );
row2 = x;
// Setting the 3rd row of matrix B to y
row( B, 3UL ) = y;
// Setting x to the 4th row of the result of the matrix multiplication
x = row( A * B, 4UL );
// Setting y to the 2nd row of the result of the sparse matrix multiplication
y = row( C * D, 2UL );
\endcode
// The \c row() function can be used on any dense or sparse matrix, including expressions, as
// illustrated by the source code example. However, rows cannot be instantiated for expression
// types, but only for matrix primitives, respectively, i.e. for matrix types that offer write
// access.
//
//
// \n \section views_rows_common_operations Common Operations
// <hr>
//
// A row view can be used like any other row vector. For instance, the current number of elements
// can be obtained via the \c size() function, the current capacity via the \c capacity() function,
// and the number of non-zero elements via the \c nonZeros() function. However, since rows are
// references to specific rows of a matrix, several operations are not possible on views, such
// as resizing and swapping. The following example shows this by means of a dense row view:
\code
typedef blaze::DynamicMatrix<int,rowMajor> MatrixType;
typedef blaze::Row<MatrixType> RowType;
MatrixType A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd row of matrix A
RowType row2 = row( A, 2UL );
row2.size(); // Returns the number of elements in the row
row2.capacity(); // Returns the capacity of the row
row2.nonZeros(); // Returns the number of non-zero elements contained in the row
row2.resize( 84UL ); // Compilation error: Cannot resize a single row of a matrix
RowType row3 = row( A, 3UL );
swap( row2, row3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_rows_element_access Element Access
// <hr>
//
// The elements of the row can be directly accessed with the subscript operator. The numbering
// of the row elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of columns of the referenced matrix. Alternatively, the elements of
// a row can be traversed via iterators. Just as with vectors, in case of non-const rows,
// \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero
// value, in case of a constant row a ConstIterator is returned:
\code
typedef blaze::DynamicMatrix<int,rowMajor> MatrixType;
typedef blaze::Row<MatrixType> RowType;
MatrixType A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of matrix A
RowType row31 = row( A, 31UL );
for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) {
*it = ...; // OK; Write access to the dense row value
... = *it; // OK: Read access to the dense row value.
}
for( RowType::ConstIterator it=row31.begin(); it!=row31.end(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense row value.
}
\endcode
\code
typedef blaze::CompressedMatrix<int,rowMajor> MatrixType;
typedef blaze::Row<MatrixType> RowType;
MatrixType A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of matrix A
RowType row31 = row( A, 31UL );
for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
for( RowType::Iterator it=row31.begin(); it!=row31.end(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_rows_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse row can be done by several alternative functions.
// The following example demonstrates all options:
\code
typedef blaze::CompressedMatrix<double,blaze::rowMajor> MatrixType;
MatrixType A( 10UL, 100UL ); // Non-initialized 10x100 matrix
typedef blaze::Row<MatrixType> RowType;
RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A
// The subscript operator provides access to all possible elements of the sparse row,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse row, the element is inserted into the row.
row0[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the row it is inserted into the row, if it is already contained in
// the row its value is modified.
row0.set( 45UL, -1.2 );
// An alternative for inserting elements into the row is the insert() function. However,
// it inserts the element only in case the element is not already contained in the row.
row0.insert( 50UL, 3.7 );
// A very efficient way to add new elements to a sparse row is the append() function.
// Note that append() requires that the appended element's index is strictly larger than
// the currently largest non-zero index of the row and that the row's capacity is large
// enough to hold the new element.
row0.reserve( 10UL );
row0.append( 51UL, -2.1 );
\endcode
// \n \section views_rows_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse rows can be used in all arithmetic operations that any other dense or
// sparse row vector can be used in. The following example gives an impression of the use of
// dense rows within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse rows with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::rowVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::rowVector> c( 2UL );
c[1] = 3.0;
typedef blaze::DynamicMatrix<double,blaze::rowMajor> DenseMatrix;
DenseMatrix A( 4UL, 2UL ); // Non-initialized 4x2 matrix
typedef blaze::Row<DenseMatrix> RowType;
RowType row0( row( A, 0UL ) ); // Reference to the 0th row of A
row0[0] = 0.0; // Manual initialization of the 0th row of A
row0[1] = 0.0;
row( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st row of A
row( A, 2UL ) = a; // Dense vector initialization of the 2nd row of A
row( A, 3UL ) = c; // Sparse vector initialization of the 3rd row of A
b = row0 + a; // Dense vector/dense vector addition
b = c + row( A, 1UL ); // Sparse vector/dense vector addition
b = row0 * row( A, 2UL ); // Component-wise vector multiplication
row( A, 1UL ) *= 2.0; // In-place scaling of the 1st row
b = row( A, 1UL ) * 2.0; // Scaling of the 1st row
b = 2.0 * row( A, 1UL ); // Scaling of the 1st row
row( A, 2UL ) += a; // Addition assignment
row( A, 2UL ) -= c; // Subtraction assignment
row( A, 2UL ) *= row( A, 0UL ); // Multiplication assignment
double scalar = row( A, 1UL ) * trans( c ); // Scalar/dot/inner product between two vectors
A = trans( c ) * row( A, 1UL ); // Outer product between two vectors
\endcode
// \n \section views_rows_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order
// <hr>
//
// Especially noteworthy is that row views can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
typedef blaze::CompressedMatrix<int,columnMajor> MatrixType;
typedef blaze::Row<MatrixType> RowType;
MatrixType A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 31st row of a column-major matrix A
RowType row1 = row( A, 1UL );
for( RowType::Iterator it=row1.begin(); it!=row1.end(); ++it ) {
// ...
}
\endcode
// However, please note that creating a row view on a matrix stored in a column-major fashion
// can result in a considerable performance decrease in comparison to a view on a matrix with
// a fitting storage orientation. This is due to the non-contiguous storage of the matrix
// elements. Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two column-major matrices
CompressedMatrix<double,columnMajor> A( 128UL, 128UL );
CompressedMatrix<double,columnMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th row of the multiplication between A and B ...
CompressedVector<double,rowVector> x = row( A * B, 15UL );
// ... is essentially the same as the following computation, which multiplies
// the 15th row of the column-major matrix A with B.
CompressedVector<double,rowVector> x = row( A, 15UL ) * B;
\endcode
// Although \b Blaze performs the resulting vector/matrix multiplication as efficiently as possible
// using a row-major storage order for matrix A would result in a more efficient evaluation.
//
// \n Previous: \ref views_submatrices Next: \ref views_columns
*/
//*************************************************************************************************
//**Columns****************************************************************************************
/*!\page views_columns Columns
//
// \tableofcontents
//
//
// Just as rows provide a view on a specific row of a matrix, columns provide views on a specific
// column of a dense or sparse matrix. As such, columns act as a reference to a specific column.
// This reference is valid an can be used in every way any other column vector can be used as long
// as the matrix containing the column is not resized or entirely destroyed. Changes made to the
// elements (e.g. modifying values, inserting or erasing elements) are immediately visible in the
// matrix and changes made via the matrix are immediately visible in the column.
//
//
// \n \section views_columns_class The Column Class Template
// <hr>
//
// The blaze::Column class template represents a reference to a specific column of a dense or
// sparse matrix primitive. It can be included via the header file
\code
#include <blaze/math/Column.h>
\endcode
// The type of the matrix is specified via template parameter:
\code
template< typename MT >
class Column;
\endcode
// \c MT specifies the type of the matrix primitive. Column can be used with every matrix
// primitive, but does not work with any matrix expression type.
//
//
// \n \section views_colums_setup Setup of Columns
// <hr>
//
// Similar to the setup of a row, a reference to a dense or sparse column can be created very
// conveniently via the \c column() function. This reference can be treated as any other column
// vector, i.e. it can be assigned to, copied from, and be used in arithmetic operations. The
// column can either be used as an alias to grant write access to a specific column of a matrix
// primitive on the left-hand side of an assignment or to grant read-access to a specific column
// of a matrix primitive or expression on the right-hand side of an assignment. The following
// two examples demonstrate this for dense and sparse matrices:
\code
typedef blaze::DynamicVector<double,columnVector> DenseVectorType;
typedef blaze::CompressedVector<double,columnVector> SparseVectorType;
typedef blaze::DynamicMatrix<double,columnMajor> DenseMatrixType;
typedef blaze::CompressedMatrix<double,columnMajor> SparseMatrixType;
DenseVectorType x;
SparseVectorType y;
DenseMatrixType A, B;
SparseMatrixType C, D;
// ... Resizing and initialization
// Setting the 1st column of matrix A to x
blaze::Column<DenseMatrixType> col1 = column( A, 1UL );
col1 = x;
// Setting the 4th column of matrix B to y
column( B, 4UL ) = y;
// Setting x to the 2nd column of the result of the matrix multiplication
x = column( A * B, 2UL );
// Setting y to the 2nd column of the result of the sparse matrix multiplication
y = column( C * D, 2UL );
\endcode
// The \c column() function can be used on any dense or sparse matrix, including expressions, as
// illustrated by the source code example. However, columns cannot be instantiated for expression
// types, but only for matrix primitives, respectively, i.e. for matrix types that offer write
// access.
//
//
// \n \section views_columns_common_operations Common Operations
// <hr>
//
// A column view can be used like any other column vector. For instance, the current number of
// elements can be obtained via the \c size() function, the current capacity via the \c capacity()
// function, and the number of non-zero elements via the \c nonZeros() function. However, since
// columns are references to specific columns of a matrix, several operations are not possible on
// views, such as resizing and swapping. The following example shows this by means of a dense
// column view:
\code
typedef blaze::DynamicMatrix<int,columnMajor> MatrixType;
typedef blaze::Column<MatrixType> ColumnType;
MatrixType A( 42UL, 42UL );
// ... Resizing and initialization
// Creating a reference to the 2nd column of matrix A
ColumnType col2 = column( A, 2UL );
col2.size(); // Returns the number of elements in the column
col2.capacity(); // Returns the capacity of the column
col2.nonZeros(); // Returns the number of non-zero elements contained in the column
col2.resize( 84UL ); // Compilation error: Cannot resize a single column of a matrix
ColumnType col3 = column( A, 3UL );
swap( col2, col3 ); // Compilation error: Swap operation not allowed
\endcode
// \n \section views_columns_element_access Element Access
// <hr>
//
// The elements of the column can be directly accessed with the subscript operator. The numbering
// of the column elements is
\f[\left(\begin{array}{*{5}{c}}
0 & 1 & 2 & \cdots & N-1 \\
\end{array}\right),\f]
// where N is the number of rows of the referenced matrix. Alternatively, the elements of
// a column can be traversed via iterators. Just as with vectors, in case of non-const columns,
// \c begin() and \c end() return an Iterator, which allows a manipulation of the non-zero
// value, in case of a constant column a ConstIterator is returned:
\code
typedef blaze::DynamicMatrix<int,columnMajor> MatrixType;
typedef blaze::Column<MatrixType> ColumnType;
MatrixType A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of matrix A
ColumnType col31 = column( A, 31UL );
for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) {
*it = ...; // OK; Write access to the dense column value
... = *it; // OK: Read access to the dense column value.
}
for( ColumnType::ConstIterator it=col31.begin(); it!=col31.end(); ++it ) {
*it = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = *it; // OK: Read access to the dense column value.
}
\endcode
\code
typedef blaze::CompressedMatrix<int,columnMajor> MatrixType;
typedef blaze::Column<MatrixType> ColumnType;
MatrixType A( 128UL, 256UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of matrix A
ColumnType col31 = column( A, 31UL );
for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) {
it->value() = ...; // OK: Write access to the value of the non-zero element.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
for( ColumnType::Iterator it=col31.begin(); it!=col31.end(); ++it ) {
it->value() = ...; // Compilation error: Assignment to the value via a ConstIterator is invalid.
... = it->value(); // OK: Read access to the value of the non-zero element.
it->index() = ...; // Compilation error: The index of a non-zero element cannot be changed.
... = it->index(); // OK: Read access to the index of the sparse element.
}
\endcode
// \n \section views_columns_element_insertion Element Insertion
// <hr>
//
// Inserting/accessing elements in a sparse column can be done by several alternative functions.
// The following example demonstrates all options:
\code
typedef blaze::CompressedMatrix<double,blaze::columnMajor> MatrixType;
MatrixType A( 100UL, 10UL ); // Non-initialized 10x100 matrix
typedef blaze::Column<MatrixType> ColumnType;
ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A
// The subscript operator provides access to all possible elements of the sparse column,
// including the zero elements. In case the subscript operator is used to access an element
// that is currently not stored in the sparse column, the element is inserted into the column.
col0[42] = 2.0;
// The second operation for inserting elements is the set() function. In case the element
// is not contained in the column it is inserted into the column, if it is already contained
// in the column its value is modified.
col0.set( 45UL, -1.2 );
// An alternative for inserting elements into the column is the insert() function. However,
// it inserts the element only in case the element is not already contained in the column.
col0.insert( 50UL, 3.7 );
// A very efficient way to add new elements to a sparse column is the append() function.
// Note that append() requires that the appended element's index is strictly larger than
// the currently largest non-zero index of the column and that the column's capacity is
// large enough to hold the new element.
col0.reserve( 10UL );
col0.append( 51UL, -2.1 );
\endcode
// \n \section views_columns_arithmetic_operations Arithmetic Operations
// <hr>
//
// Both dense and sparse columns can be used in all arithmetic operations that any other dense or
// sparse column vector can be used in. The following example gives an impression of the use of
// dense columns within arithmetic operations. All operations (addition, subtraction, multiplication,
// scaling, ...) can be performed on all possible combinations of dense and sparse columns with
// fitting element types:
\code
blaze::DynamicVector<double,blaze::columnVector> a( 2UL, 2.0 ), b;
blaze::CompressedVector<double,blaze::columnVector> c( 2UL );
c[1] = 3.0;
typedef blaze::DynamicMatrix<double,blaze::columnMajor> MatrixType;
MatrixType A( 2UL, 4UL ); // Non-initialized 2x4 matrix
typedef blaze::Column<DenseMatrix> ColumnType;
ColumnType col0( column( A, 0UL ) ); // Reference to the 0th column of A
col0[0] = 0.0; // Manual initialization of the 0th column of A
col0[1] = 0.0;
column( A, 1UL ) = 1.0; // Homogeneous initialization of the 1st column of A
column( A, 2UL ) = a; // Dense vector initialization of the 2nd column of A
column( A, 3UL ) = c; // Sparse vector initialization of the 3rd column of A
b = col0 + a; // Dense vector/dense vector addition
b = c + column( A, 1UL ); // Sparse vector/dense vector addition
b = col0 * column( A, 2UL ); // Component-wise vector multiplication
column( A, 1UL ) *= 2.0; // In-place scaling of the 1st column
b = column( A, 1UL ) * 2.0; // Scaling of the 1st column
b = 2.0 * column( A, 1UL ); // Scaling of the 1st column
column( A, 2UL ) += a; // Addition assignment
column( A, 2UL ) -= c; // Subtraction assignment
column( A, 2UL ) *= column( A, 0UL ); // Multiplication assignment
double scalar = trans( c ) * column( A, 1UL ); // Scalar/dot/inner product between two vectors
A = column( A, 1UL ) * trans( c ); // Outer product between two vectors
\endcode
// \n \section views_columns_non_fitting_storage_order Views on Matrices with Non-Fitting Storage Order
// <hr>
//
// Especially noteworthy is that column views can be created for both row-major and column-major
// matrices. Whereas the interface of a row-major matrix only allows to traverse a row directly
// and the interface of a column-major matrix only allows to traverse a column, via views it is
// possible to traverse a row of a column-major matrix or a column of a row-major matrix. For
// instance:
\code
typedef blaze::CompressedMatrix<int,rowMajor> MatrixType;
typedef blaze::Column<MatrixType> ColumnType;
MatrixType A( 64UL, 32UL );
// ... Resizing and initialization
// Creating a reference to the 31st column of a row-major matrix A
ColumnType col1 = column( A, 1UL );
for( ColumnType::Iterator it=col1.begin(); it!=col1.end(); ++it ) {
// ...
}
\endcode
// However, please note that creating a column view on a matrix stored in a row-major fashion
// can result in a considerable performance decrease in comparison to a view on a matrix with
// a fitting storage orientation. This is due to the non-contiguous storage of the matrix
// elements. Therefore care has to be taken in the choice of the most suitable storage order:
\code
// Setup of two row-major matrices
CompressedMatrix<double,rowMajor> A( 128UL, 128UL );
CompressedMatrix<double,rowMajor> B( 128UL, 128UL );
// ... Resizing and initialization
// The computation of the 15th column of the multiplication between A and B ...
CompressedVector<double,columnVector> x = column( A * B, 15UL );
// ... is essentially the same as the following computation, which multiplies
// the 15th column of the row-major matrix B with A.
CompressedVector<double,columnVector> x = A * column( B, 15UL );
\endcode
// Although \b Blaze performs the resulting matrix/vector multiplication as efficiently as possible
// using a column-major storage order for matrix B would result in a more efficient evaluation.
//
// \n Previous: \ref views_rows Next: \ref arithmetic_operations
*/
//*************************************************************************************************
//**Arithmetic Operations**************************************************************************
/*!\page arithmetic_operations Arithmetic Operations
//
// \tableofcontents
//
//
// \b Blaze provides the following arithmetic operations for vectors and matrices:
//
// <ul>
// <li> \ref addition </li>
// <li> \ref subtraction </li>
// <li> \ref scalar_multiplication </li>
// <li> \ref vector_vector_multiplication
// <ul>
// <li> \ref componentwise_multiplication </li>
// <li> \ref inner_product </li>
// <li> \ref outer_product </li>
// <li> \ref cross_product </li>
// </ul>
// </li>
// <li> \ref vector_vector_division </li>
// <li> \ref matrix_vector_multiplication </li>
// <li> \ref matrix_matrix_multiplication </li>
// </ul>
//
// \n Previous: \ref views_columns Next: \ref addition
*/
//*************************************************************************************************
//**Addition***************************************************************************************
/*!\page addition Addition
//
// The addition of vectors and matrices is as intuitive as the addition of scalar values. For both
// the vector addition as well as the matrix addition the addition operator can be used. It even
// enables the addition of dense and sparse vectors as well as the addition of dense and sparse
// matrices:
\code
blaze::DynamicVector<int> v1( 5UL ), v3;
blaze::CompressedVector<float> v2( 5UL );
// ... Initializing the vectors
v3 = v1 + v2; // Addition of a two column vectors of different data type
\endcode
\code
blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL );
blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 + M2; // Addition of a row-major and a column-major matrix of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that in case of vectors it is only possible to
// add vectors with the same transpose flag:
\code
blaze::DynamicVector<int,columnVector> v1( 5UL );
blaze::CompressedVector<float,rowVector> v2( 5UL );
v1 + v2; // Compilation error: Cannot add a column vector and a row vector
v1 + trans( v2 ); // OK: Addition of two column vectors
\endcode
// In case of matrices, however, it is possible to add row-major and column-major matrices. Note
// however that in favor of performance the addition of two matrices with the same storage order
// is favorable. The same argument holds for the element type: In case two vectors or matrices
// with the same element type are added, the performance can be much higher due to vectorization
// of the operation.
\code
blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 + v2; // Vectorized addition of two double precision vectors
\endcode
\code
blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 + M2; // Vectorized addition of two row-major, single precision dense matrices
\endcode
// \n Previous: \ref arithmetic_operations Next: \ref subtraction
*/
//*************************************************************************************************
//**Subtraction************************************************************************************
/*!\page subtraction Subtraction
//
// The subtraction of vectors and matrices works exactly as intuitive as the addition, but with
// the subtraction operator. For both the vector subtraction as well as the matrix subtraction
// the subtraction operator can be used. It also enables the subtraction of dense and sparse
// vectors as well as the subtraction of dense and sparse matrices:
\code
blaze::DynamicVector<int> v1( 5UL ), v3;
blaze::CompressedVector<float> v2( 5UL );
// ... Initializing the vectors
v3 = v1 - v2; // Subtraction of a two column vectors of different data type
blaze::DynamicMatrix<float,rowMajor> M1( 7UL, 3UL );
blaze::CompressedMatrix<size_t,columnMajor> M2( 7UL, 3UL ), M3;
// ... Initializing the matrices
M3 = M1 - M2; // Subtraction of a row-major and a column-major matrix of different data type
\endcode
// Note that it is necessary that both operands have exactly the same dimensions. Violating this
// precondition results in an exception. Also note that in case of vectors it is only possible to
// subtract vectors with the same transpose flag:
\code
blaze::DynamicVector<int,columnVector> v1( 5UL );
blaze::CompressedVector<float,rowVector> v2( 5UL );
v1 - v2; // Compilation error: Cannot subtract a row vector from a column vector
v1 - trans( v2 ); // OK: Subtraction of two column vectors
\endcode
// In case of matrices, however, it is possible to subtract row-major and column-major matrices.
// Note however that in favor of performance the subtraction of two matrices with the same storage
// order is favorable. The same argument holds for the element type: In case two vectors or matrices
// with the same element type are added, the performance can be much higher due to vectorization
// of the operation.
\code
blaze::DynamicVector<double>v1( 100UL ), v2( 100UL ), v3;
// ... Initialization of the vectors
v3 = v1 - v2; // Vectorized subtraction of two double precision vectors
blaze::DynamicMatrix<float> M1( 50UL, 70UL ), M2( 50UL, 70UL ), M3;
// ... Initialization of the matrices
M3 = M1 - M2; // Vectorized subtraction of two row-major, single precision dense matrices
\endcode
// \n Previous: \ref addition Next: \ref scalar_multiplication
*/
//*************************************************************************************************
//**Scalar Multiplication**************************************************************************
/*!\page scalar_multiplication Scalar Multiplication
//
// The scalar multiplication is the multiplication of a scalar value with a vector or a matrix.
// In \b Blaze it is possible to use all built-in/fundamental data types except bool as scalar
// values. Additionally, it is possible to use std::complex values with the same built-in data
// types as element type.
\code
blaze::StaticVector<int,3UL> v1{ 1, 2, 3 };
blaze::DynamicVector<double> v2 = v1 * 1.2;
blaze::CompressedVector<float> v3 = -0.3F * v1;
\endcode
\code
blaze::StaticMatrix<int,3UL,2UL> M1{ { 1, 2 }, { 3, 4 }, { 5, 6 } };
blaze::DynamicMatrix<double> M2 = M1 * 1.2;
blaze::CompressedMatrix<float> M3 = -0.3F * M1;
\endcode
// Vectors and matrices cannot be used for as scalar value for scalar multiplications (see the
// following example). However, each vector and matrix provides the \c scale() function, which
// can be used to scale a vector or matrix element-wise with arbitrary scalar data types:
\code
blaze::CompressedMatrix< blaze::StaticMatrix<int,3UL,3UL> > M1;
blaze::StaticMatrix<int,3UL,3UL> scalar;
M1 * scalar; // No scalar multiplication, but matrix/matrix multiplication
M1.scale( scalar ); // Scalar multiplication
\endcode
// \n Previous: \ref subtraction Next: \ref componentwise_multiplication
*/
//*************************************************************************************************
//**Vector/Vector Multiplication*******************************************************************
/*!\page vector_vector_multiplication Vector/Vector Multiplication
//
// \n \section componentwise_multiplication Componentwise Multiplication
// <hr>
//
// Multiplying two vectors with the same transpose flag (i.e. either blaze::columnVector or
// blaze::rowVector) via the multiplication operator results in a componentwise multiplication
// of the two vectors:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
CompressedVector<int,columnVector> v1( 17UL );
DynamicVector<int,columnVector> v2( 17UL );
StaticVector<double,10UL,rowVector> v3;
DynamicVector<double,rowVector> v4( 10UL );
// ... Initialization of the vectors
CompressedVector<int,columnVector> v5( v1 * v2 ); // Componentwise multiplication of a sparse and
// a dense column vector. The result is a sparse
// column vector.
DynamicVector<double,rowVector> v6( v3 * v4 ); // Componentwise multiplication of two dense row
// vectors. The result is a dense row vector.
\endcode
// \n \section inner_product Inner Product / Scalar Product / Dot Product
// <hr>
//
// The multiplication between a row vector and a column vector results in an inner product between
// the two vectors:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 };
int result = v1 * v2; // Results in the value 15
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
int result = v1 * trans( v2 ); // Also results in the value 15
\endcode
// Alternatively, either the \c inner() function, the \c dot() function or the comma operator can
// be used for any combination of vectors (row or column vectors) to perform an inner product:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
// All alternatives for the inner product between a column vector and a row vector
int result1 = trans( v1 ) * trans( v2 );
int result2 = inner( v1, v2 );
int result3 = dot( v1, v2 );
int result4 = (v1,v2);
\endcode
// When using the comma operator, please note the brackets embracing the inner product expression.
// Due to the low precedence of the comma operator (lower even than the assignment operator) these
// brackets are strictly required for a correct evaluation of the inner product.
//
//
// \n \section outer_product Outer Product
// <hr>
//
// The multiplication between a column vector and a row vector results in the outer product of
// the two vectors:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,rowVector> v2{ -1, 3, -2 };
StaticMatrix<int,3UL,3UL> M1 = v1 * v2;
\endcode
// The \c trans() function can be used to transpose a vector as necessary:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
int result = trans( v1 ) * v2;
\endcode
// Alternatively, the \c outer() function can be used for any combination of vectors (row or column
// vectors) to perform an outer product:
\code
blaze::StaticVector<int,3UL,rowVector> v1{ 2, 5, -1 };
blaze::StaticVector<int,3UL,rowVector> v2{ -1, 3, -2 };
StaticMatrix<int,3UL,3UL> M1 = outer( v1, v2 ); // Outer product between two row vectors
\endcode
// \n \section cross_product Cross Product
// <hr>
//
// Two vectors with the same transpose flag can be multiplied via the cross product. The cross
// product between two vectors \f$ a \f$ and \f$ b \f$ is defined as
\f[
\left(\begin{array}{*{1}{c}}
c_0 \\
c_1 \\
c_2 \\
\end{array}\right)
=
\left(\begin{array}{*{1}{c}}
a_1 b_2 - a_2 b_1 \\
a_2 b_0 - a_0 b_2 \\
a_0 b_1 - a_1 b_0 \\
\end{array}\right).
\f]
// Due to the absence of a \f$ \times \f$ operator in the C++ language, the cross product is
// realized via the \c cross() function. Alternatively, the modulo operator (i.e. \c operator%)
// can be used in case infix notation is required:
\code
blaze::StaticVector<int,3UL,columnVector> v1{ 2, 5, -1 };
blaze::DynamicVector<int,columnVector> v2{ -1, 3, -2 };
blaze::StaticVector<int,3UL,columnVector> v3( cross( v1, v2 ) );
blaze::StaticVector<int,3UL,columnVector> v4( v1 % v2 );
\endcode
// Please note that the cross product is restricted to three dimensional (dense and sparse)
// column vectors.
//
// \n Previous: \ref scalar_multiplication Next: \ref vector_vector_division
*/
//*************************************************************************************************
//**Vector/Vector Division*************************************************************************
/*!\page vector_vector_division Vector/Vector Division
//
// \n \section componentwise_division Componentwise Division
// <hr>
//
// Dividing a vector by a dense vector with the same transpose flag (i.e. either blaze::columnVector
// or blaze::rowVector) via the division operator results in a componentwise division:
\code
using blaze::DynamicVector;
using blaze::CompressedVector;
CompressedVector<int,columnVector> v1( 17UL );
DynamicVector<int,columnVector> v2( 17UL );
StaticVector<double,10UL,rowVector> v3;
DynamicVector<double,rowVector> v4( 10UL );
// ... Initialization of the vectors
CompressedVector<int,columnVector> v5( v1 / v2 ); // Componentwise division of a sparse and a
// dense column vector. The result is a sparse
// column vector.
DynamicVector<double,rowVector> v6( v3 / v4 ); // Componentwise division of two dense row
// vectors. The result is a dense row vector.
\endcode
// Note that all values of the divisor must be non-zero and that no checks are performed to assert
// this precondition!
//
// \n Previous: \ref vector_vector_multiplication Next: \ref matrix_vector_multiplication
*/
//*************************************************************************************************
//**Matrix/Vector Multiplication*******************************************************************
/*!\page matrix_vector_multiplication Matrix/Vector Multiplication
//
// In \b Blaze matrix/vector multiplications can be as intuitively formulated as in mathematical
// textbooks. Just as in textbooks there are two different multiplications between a matrix and
// a vector: a matrix/column vector multiplication and a row vector/matrix multiplication:
\code
using blaze::StaticVector;
using blaze::DynamicVector;
using blaze::DynamicMatrix;
DynamicMatrix<int> M1( 39UL, 12UL );
StaticVector<int,12UL,columnVector> v1;
// ... Initialization of the matrix and the vector
DynamicVector<int,columnVector> v2 = M1 * v1; // Matrix/column vector multiplication
DynamicVector<int,rowVector> v3 = trans( v1 ) * M1; // Row vector/matrix multiplication
\endcode
// Note that the storage order of the matrix poses no restrictions on the operation. Also note,
// that the highest performance for a multiplication between a dense matrix and a dense vector can
// be achieved if both the matrix and the vector have the same scalar element type.
//
// \n Previous: \ref vector_vector_division Next: \ref matrix_matrix_multiplication
*/
//*************************************************************************************************
//**Matrix/Matrix Multiplication*******************************************************************
/*!\page matrix_matrix_multiplication Matrix/Matrix Multiplication
//
// \n \section schur_product Componentwise Multiplication / Schur Product
// <hr>
//
// Multiplying two matrices with the same dimensions (i.e. the same number of rows and columns)
// via the modulo operator results in a componentwise multiplication (Schur product) of the two
// matrices:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 28UL, 35UL );
CompressedMatrix<float> M2( 28UL, 35UL );
// ... Initialization of the matrices
DynamicMatrix<double> M3 = M1 % M2;
\endcode
// \n \section matrix_product Matrix Product
// <hr>
//
// The matrix/matrix product can be formulated exactly as in mathematical textbooks:
\code
using blaze::DynamicMatrix;
using blaze::CompressedMatrix;
DynamicMatrix<double> M1( 45UL, 85UL );
CompressedMatrix<float> M2( 85UL, 37UL );
// ... Initialization of the matrices
DynamicMatrix<double> M3 = M1 * M2;
\endcode
// The storage order of the two matrices poses no restrictions on the operation, all variations
// are possible. It is also possible to multiply two matrices with different element type, as
// long as the element types themselves can be multiplied and added. Note however that the
// highest performance for a multiplication between two matrices can be expected for two
// matrices with the same scalar element type.
//
// In case the resulting matrix is known to be symmetric, Hermitian, lower triangular, upper
// triangular, or diagonal, the computation can be optimized by explicitly declaring the
// multiplication as symmetric, Hermitian, lower triangular, upper triangular, or diagonal by
// means of the \ref matrix_operations_declaration_operations :
\code
using blaze::DynamicMatrix;
DynamicMatrix<double> M1, M2, M3;
// ... Initialization of the square matrices
M3 = declsym ( M1 * M2 ); // Declare the result of the matrix multiplication as symmetric
M3 = declherm( M1 * M2 ); // Declare the result of the matrix multiplication as Hermitian
M3 = decllow ( M1 * M2 ); // Declare the result of the matrix multiplication as lower triangular
M3 = declupp ( M1 * M2 ); // Declare the result of the matrix multiplication as upper triangular
M3 = decldiag( M1 * M2 ); // Declare the result of the matrix multiplication as diagonal
\endcode
// Using a declaration operation on the a multiplication expression can speed up the computation
// by a factor of 2. Note however that the caller of the according declaration operation takes
// full responsibility for the correctness of the declaration. Falsely declaring a multiplication
// as symmetric, Hermitian, lower triangular, upper triangular, or diagonal leads to undefined
// behavior!
//
// \n Previous: \ref matrix_vector_multiplication Next: \ref shared_memory_parallelization
*/
//*************************************************************************************************
//**Shared Memory Parallelization******************************************************************
/*!\page shared_memory_parallelization Shared Memory Parallelization
//
// For all possible operations \b Blaze tries to achieve maximum performance on a single CPU
// core. However, today's CPUs are not single core anymore, but provide several (homogeneous
// or heterogeneous) compute cores. In order to fully exploit the performance potential of a
// multicore CPU, computations have to be parallelized across all available cores of a CPU.
// For this purpose, \b Blaze provides three different shared memory parallelization techniques:
//
// - \ref openmp_parallelization
// - \ref cpp_threads_parallelization
// - \ref boost_threads_parallelization
//
// When any of the shared memory parallelization techniques is activated, all arithmetic
// operations on dense vectors and matrices (including additions, subtractions, multiplications,
// divisions, and all componentwise arithmetic operations) and most operations on sparse vectors
// and matrices are automatically run in parallel. However, in addition, \b Blaze provides means
// to enforce the serial execution of specific operations:
//
// - \ref serial_execution
//
// \n Previous: \ref matrix_matrix_multiplication Next: \ref openmp_parallelization
*/
//*************************************************************************************************
//**OpenMP Parallelization*************************************************************************
/*!\page openmp_parallelization OpenMP Parallelization
//
// \tableofcontents
//
//
// \n \section openmp_setup OpenMP Setup
// <hr>
//
// To enable the OpenMP-based parallelization, all that needs to be done is to explicitly specify
// the use of OpenMP on the command line:
\code
-fopenmp // GNU C++ compiler
-openmp // Intel C++ compiler
/openmp // Visual Studio
\endcode
// This simple action will cause the \b Blaze library to automatically try to run all operations
// in parallel with the specified number of threads.
//
// As common for OpenMP, the number of threads can be specified either via an environment variable
\code
export OMP_NUM_THREADS=4 // Unix systems
set OMP_NUM_THREADS=4 // Windows systems
\endcode
// or via an explicit call to the \c omp_set_num_threads() function:
\code
omp_set_num_threads( 4 );
\endcode
// Alternatively, the number of threads can also be specified via the \c setNumThreads() function
// provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of OpenMP, the function returns the maximum number of threads OpenMP will use
// within a parallel region and is therefore equivalent to the \c omp_get_max_threads() function.
//
//
// \n \section openmp_configuration OpenMP Configuration
// <hr>
//
// Note that \b Blaze is not unconditionally running an operation in parallel. In case \b Blaze
// deems the parallel execution as counterproductive for the overall performance, the operation
// is executed serially. One of the main reasons for not executing an operation in parallel is
// the size of the operands. For instance, a vector addition is only executed in parallel if the
// size of both vector operands exceeds a certain threshold. Otherwise, the performance could
// seriously decrease due to the overhead caused by the thread setup. However, in order to be
// able to adjust the \b Blaze library to a specific system, it is possible to configure these
// thresholds manually. All shared memory thresholds are contained within the configuration file
// <tt>./blaze/config/Thresholds.h</tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique (see also \ref cpp_threads_parallelization and
// \ref boost_threads_parallelization). Therefore the default values cannot guarantee maximum
// performance for all possible situations and configurations. They merely provide a reasonable
// standard for the current CPU generation.
//
//
// \n \section openmp_first_touch First Touch Policy
// <hr>
//
// So far the \b Blaze library does not (yet) automatically initialize dynamic memory according
// to the first touch principle. Consider for instance the following vector triad example:
\code
using blaze::columnVector;
const size_t N( 1000000UL );
blaze::DynamicVector<double,columnVector> a( N ), b( N ), c( N ), d( N );
// Initialization of the vectors b, c, and d
for( size_t i=0UL; i<N; ++i ) {
b[i] = rand<double>();
c[i] = rand<double>();
d[i] = rand<double>();
}
// Performing a vector triad
a = b + c * d;
\endcode
// If this code, which is prototypical for many OpenMP applications that have not been optimized
// for ccNUMA architectures, is run across several locality domains (LD), it will not scale
// beyond the maximum performance achievable on a single LD if the working set does not fit into
// the cache. This is because the initialization loop is executed by a single thread, writing to
// \c b, \c c, and \c d for the first time. Hence, all memory pages belonging to those arrays will
// be mapped into a single LD.
//
// As mentioned above, this problem can be solved by performing vector initialization in parallel:
\code
// ...
// Initialization of the vectors b, c, and d
#pragma omp parallel for
for( size_t i=0UL; i<N; ++i ) {
b[i] = rand<double>();
c[i] = rand<double>();
d[i] = rand<double>();
}
// ...
\endcode
// This simple modification makes a huge difference on ccNUMA in memory-bound situations (as for
// instance in all BLAS level 1 operations and partially BLAS level 2 operations). Therefore, in
// order to achieve the maximum possible performance, it is imperative to initialize the memory
// according to the later use of the data structures.
//
//
// \n \section openmp_limitations Limitations of the OpenMP Parallelization
// <hr>
//
// There are a few important limitations to the current \b Blaze OpenMP parallelization. The first
// one involves the explicit use of an OpenMP parallel region (see \ref openmp_parallel), the
// other one the OpenMP \c sections directive (see \ref openmp_sections).
//
//
// \n \subsection openmp_parallel The Parallel Directive
//
// In OpenMP threads are explicitly spawned via the an OpenMP parallel directive:
\code
// Serial region, executed by a single thread
#pragma omp parallel
{
// Parallel region, executed by the specified number of threads
}
// Serial region, executed by a single thread
\endcode
// Conceptually, the specified number of threads (see \ref openmp_setup) is created every time a
// parallel directive is encountered. Therefore, from a performance point of view, it seems to be
// beneficial to use a single OpenMP parallel directive for several operations:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
#pragma omp parallel
{
y1 = A * x;
y2 = B * x;
}
\endcode
// Unfortunately, this optimization approach is not allowed within the \b Blaze library. More
// explicitly, it is not allowed to put an operation into a parallel region. The reason is that
// the entire code contained within a parallel region is executed by all threads. Although this
// appears to just comprise the contained computations, a computation (or more specifically the
// assignment of an expression to a vector or matrix) can contain additional logic that must not
// be handled by multiple threads (as for instance memory allocations, setup of temporaries, etc.).
// Therefore it is not possible to manually start a parallel region for several operations, but
// \b Blaze will spawn threads automatically, depending on the specifics of the operation at hand
// and the given operands.
//
// \n \subsection openmp_sections The Sections Directive
//
// OpenMP provides several work-sharing construct to distribute work among threads. One of these
// constructs is the \c sections directive:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
#pragma omp sections
{
#pragma omp section
y1 = A * x;
#pragma omp section
y2 = B * x;
}
\endcode
// In this example, two threads are used to compute two distinct matrix/vector multiplications
// concurrently. Thereby each of the \c sections is executed by exactly one thread.
//
// Unfortunately \b Blaze does not support concurrent parallel computations and therefore this
// approach does not work with any of the \b Blaze parallelization techniques. All techniques
// (including the C++11 and Boost thread parallelizations; see \ref cpp_threads_parallelization
// and \ref boost_threads_parallelization) are optimized for the parallel computation of an
// operation within a single thread of execution. This means that \b Blaze tries to use all
// available threads to compute the result of a single operation as efficiently as possible.
// Therefore, for this special case, it is advisable to disable all \b Blaze parallelizations
// and to let \b Blaze compute all operations within a \c sections directive in serial. This can
// be done by either completely disabling the \b Blaze parallelization (see \ref serial_execution)
// or by selectively serializing all operations within a \c sections directive via the \c serial()
// function:
\code
blaze::DynamicVector<double> x, y1, y2;
blaze::DynamicMatrix<double> A, B;
// ... Resizing and initialization
#pragma omp sections
{
#pragma omp section
y1 = serial( A * x );
#pragma omp section
y2 = serial( B * x );
}
\endcode
// Please note that the use of the \c BLAZE_SERIAL_SECTION (see also \ref serial_execution) does
// NOT work in this context!
//
// \n Previous: \ref shared_memory_parallelization Next: \ref cpp_threads_parallelization
*/
//*************************************************************************************************
//**C++11 Thread Parallelization*******************************************************************
/*!\page cpp_threads_parallelization C++11 Thread Parallelization
//
// \tableofcontents
//
//
// In addition to the OpenMP-based shared memory parallelization, starting with \b Blaze 2.1,
// \b Blaze also provides a shared memory parallelization based on C++11 threads.
//
//
// \n \section cpp_threads_setup C++11 Thread Setup
// <hr>
//
// In order to enable the C++11 thread-based parallelization, first the according C++11-specific
// compiler flags have to be used and second the \c BLAZE_USE_CPP_THREADS command line argument
// has to be explicitly specified. For instance, in case of the GNU C++ and Clang compilers the
// compiler flags have to be extended by
\code
... -std=c++11 -DBLAZE_USE_CPP_THREADS ...
\endcode
// This simple action will cause the \b Blaze library to automatically try to run all operations
// in parallel with the specified number of C++11 threads. Note that in case both OpenMP and C++11
// threads are enabled on the command line, the OpenMP-based parallelization has priority and
// is preferred.
//
// The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS
\code
export BLAZE_NUM_THREADS=4 // Unix systems
set BLAZE_NUM_THREADS=4 // Windows systems
\endcode
// or alternatively via the \c setNumThreads() function provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of C++11 threads, the function will return the previously specified number of
// threads.
//
//
// \n \section cpp_threads_configuration C++11 Thread Configuration
// <hr>
//
// As in case of the OpenMP-based parallelization \b Blaze is not unconditionally running an
// operation in parallel. In case \b Blaze deems the parallel execution as counterproductive for
// the overall performance, the operation is executed serially. One of the main reasons for not
// executing an operation in parallel is the size of the operands. For instance, a vector addition
// is only executed in parallel if the size of both vector operands exceeds a certain threshold.
// Otherwise, the performance could seriously decrease due to the overhead caused by the thread
// setup. However, in order to be able to adjust the \b Blaze library to a specific system, it
// is possible to configure these thresholds manually. All thresholds are contained within the
// configuration file <tt>./blaze/config/Thresholds.h</tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the C++11 thread parallelization.
//
//
// \n \section cpp_threads_known_issues Known Issues
// <hr>
//
// There is a known issue in Visual Studio 2012 and 2013 that may cause C++11 threads to hang
// if their destructor is executed after the \c main() function:
//
// http://connect.microsoft.com/VisualStudio/feedback/details/747145
//
// Unfortunately, the C++11 parallelization of the \b Blaze library is affected from this bug.
// In order to circumvent this problem, \b Blaze provides the \c shutDownThreads() function,
// which can be used to manually destroy all threads at the end of the \c main() function:
\code
int main()
{
// ... Using the C++11 thread parallelization of Blaze
shutDownThreads();
}
\endcode
// Please note that this function may only be used at the end of the \c main() function. After
// this function no further computation may be executed! Also note that this function has an
// effect for Visual Studio compilers only and doesn't need to be used with any other compiler.
//
// \n Previous: \ref openmp_parallelization Next: \ref boost_threads_parallelization
*/
//*************************************************************************************************
//**Boost Thread Parallelization*******************************************************************
/*!\page boost_threads_parallelization Boost Thread Parallelization
//
// \tableofcontents
//
//
// The third available shared memory parallelization provided with \b Blaze is based on Boost
// threads.
//
//
// \n \section boost_threads_setup Boost Thread Setup
// <hr>
//
// In order to enable the Boost thread-based parallelization, two steps have to be taken: First,
// the \c BLAZE_USE_BOOST_THREADS command line argument has to be explicitly specified during
// compilation:
\code
... -DBLAZE_USE_BOOST_THREADS ...
\endcode
// Second, the according Boost libraries have to be linked. These two simple actions will cause
// the \b Blaze library to automatically try to run all operations in parallel with the specified
// number of Boost threads. Note that the OpenMP-based and C++11 thread-based parallelizations
// have priority, i.e. are preferred in case either is enabled in combination with the Boost
// thread parallelization.
//
// The number of threads can be either specified via the environment variable \c BLAZE_NUM_THREADS
\code
export BLAZE_NUM_THREADS=4 // Unix systems
set BLAZE_NUM_THREADS=4 // Windows systems
\endcode
// or alternatively via the \c setNumThreads() function provided by the \b Blaze library:
\code
blaze::setNumThreads( 4 );
\endcode
// Please note that the \b Blaze library does not limit the available number of threads. Therefore
// it is in YOUR responsibility to choose an appropriate number of threads. The best performance,
// though, can be expected if the specified number of threads matches the available number of
// cores.
//
// In order to query the number of threads used for the parallelization of operations, the
// \c getNumThreads() function can be used:
\code
const size_t threads = blaze::getNumThreads();
\endcode
// In the context of Boost threads, the function will return the previously specified number of
// threads.
//
//
// \n \section boost_threads_configuration Boost Thread Configuration
// <hr>
//
// As in case of the other shared memory parallelizations \b Blaze is not unconditionally running
// an operation in parallel (see \ref openmp_parallelization or \ref cpp_threads_parallelization).
// All thresholds related to the Boost thread parallelization are also contained within the
// configuration file <tt>./blaze/config/Thresholds.h</tt>.
//
// Please note that these thresholds are highly sensitiv to the used system architecture and
// the shared memory parallelization technique. Therefore the default values cannot guarantee
// maximum performance for all possible situations and configurations. They merely provide a
// reasonable standard for the current CPU generation. Also note that the provided defaults
// have been determined using the OpenMP parallelization and require individual adaption for
// the Boost thread parallelization.
//
// \n Previous: \ref cpp_threads_parallelization Next: \ref serial_execution
*/
//*************************************************************************************************
//**Serial Execution*******************************************************************************
/*!\page serial_execution Serial Execution
//
// Sometimes it may be necessary to enforce the serial execution of specific operations. For this
// purpose, the \b Blaze library offers three possible options: the serialization of a single
// expression via the \c serial() function, the serialization of a block of expressions via the
// \c BLAZE_SERIAL_SECTION, and the general deactivation of the parallel execution.
//
//
// \n \section serial_execution_serial_expression Option 1: Serialization of a Single Expression
// <hr>
//
// The first option is the serialization of a specific operation via the \c serial() function:
\code
blaze::DynamicMatrix<double> A, B, C;
// ... Resizing and initialization
C = serial( A + B );
\endcode
// \c serial() enforces the serial evaluation of the enclosed expression. It can be used on any
// kind of dense or sparse vector or matrix expression.
//
//
// \n \section serial_execution_serial_section Option 2: Serialization of Multiple Expressions
// <hr>
//
// The second option is the temporary and local enforcement of a serial execution via the
// \c BLAZE_SERIAL_SECTION:
\code
using blaze::rowMajor;
using blaze::columnVector;
blaze::DynamicMatrix<double,rowMajor> A;
blaze::DynamicVector<double,columnVector> b, c, d, x, y, z;
// ... Resizing and initialization
// Parallel execution
// If possible and beneficial for performance the following operation is executed in parallel.
x = A * b;
// Serial execution
// All operations executed within the serial section are guaranteed to be executed in
// serial (even if a parallel execution would be possible and/or beneficial).
BLAZE_SERIAL_SECTION
{
y = A * c;
z = A * d;
}
// Parallel execution continued
// ...
\endcode
// Within the scope of the \c BLAZE_SERIAL_SECTION, all operations are guaranteed to run in serial.
// Outside the scope of the serial section, all operations are run in parallel (if beneficial for
// the performance).
//
// Note that the \c BLAZE_SERIAL_SECTION must only be used within a single thread of execution.
// The use of the serial section within several concurrent threads will result undefined behavior!
//
//
// \n \section serial_execution_deactivate_parallelism Option 3: Deactivation of Parallel Execution
// <hr>
//
// The third option is the general deactivation of the parallel execution (even in case OpenMP is
// enabled on the command line). This can be achieved via the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION
// switch in the <tt>./blaze/config/SMP.h</tt> configuration file:
\code
#define BLAZE_USE_SHARED_MEMORY_PARALLELIZATION 1
\endcode
// In case the \c BLAZE_USE_SHARED_MEMORY_PARALLELIZATION switch is set to 0, the shared memory
// parallelization is deactivated altogether.
//
// \n Previous: \ref boost_threads_parallelization Next: \ref serialization
*/
//*************************************************************************************************
//**Serialization**********************************************************************************
/*!\page serialization Serialization
//
// Sometimes it is necessary to store vector and/or matrices on disk, for instance for storing
// results or for sharing specific setups with other people. The \b Blaze math serialization
// module provides the according functionality to create platform independent, portable, binary
// representations of vectors and matrices that can be used to store the \b Blaze data structures
// without loss of precision and to reliably transfer them from one machine to another.
//
// The following two pages explain how to serialize vectors and matrices:
//
// - \ref vector_serialization
// - \ref matrix_serialization
//
// \n Previous: \ref serial_execution Next: \ref vector_serialization
*/
//*************************************************************************************************
//**Vector Serialization***************************************************************************
/*!\page vector_serialization Vector Serialization
//
// The following example demonstrates the (de-)serialization of dense and sparse vectors:
\code
using blaze::columnVector;
using blaze::rowVector;
// Serialization of both vectors
{
blaze::StaticVector<double,5UL,rowVector> d;
blaze::CompressedVector<int,columnVector> s;
// ... Resizing and initialization
// Creating an archive that writes into a the file "vectors.blaze"
blaze::Archive<std::ofstream> archive( "vectors.blaze" );
// Serialization of both vectors into the same archive. Note that d lies before s!
archive << d << s;
}
// Reconstitution of both vectors
{
blaze::DynamicVector<double,rowVector> d1;
blaze::DynamicVector<int,rowVector> d2;
// Creating an archive that reads from the file "vectors.blaze"
blaze::Archive<std::ifstream> archive( "vectors.blaze" );
// Reconstituting the former d vector into d1. Note that it is possible to reconstitute
// the vector into a differrent kind of vector (StaticVector -> DynamicVector), but that
// the type of elements has to be the same.
archive >> d1;
// Reconstituting the former s vector into d2. Note that is is even possible to reconstitute
// a sparse vector as a dense vector (also the reverse is possible) and that a column vector
// can be reconstituted as row vector (and vice versa). Note however that also in this case
// the type of elements is the same!
archive >> d2
}
\endcode
// The (de-)serialization of vectors is not restricted to vectors of built-in data type, but can
// also be used for vectors with vector or matrix element type:
\code
// Serialization
{
blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec;
// ... Resizing and initialization
// Creating an archive that writes into a the file "vector.blaze"
blaze::Archive<std::ofstream> archive( "vector.blaze" );
// Serialization of the vector into the archive
archive << vec;
}
// Deserialization
{
blaze::CompressedVector< blaze::DynamicVector< blaze::complex<double> > > vec;
// Creating an archive that reads from the file "vector.blaze"
blaze::Archive<std::ifstream> archive( "vector.blaze" );
// Reconstitution of the vector from the archive
archive >> vec;
}
\endcode
// As the examples demonstrates, the vector serialization offers an enormous flexibility. However,
// several actions result in errors:
//
// - vectors cannot be reconstituted as matrices (and vice versa)
// - the element type of the serialized and reconstituted vector must match, which means
// that on the source and destination platform the general type (signed/unsigned integral
// or floating point) and the size of the type must be exactly the same
// - when reconstituting a \c StaticVector, its size must match the size of the serialized vector
//
// In case an error is encountered during (de-)serialization, a \c std::runtime_exception is
// thrown.
//
// \n Previous: \ref serialization Next: \ref matrix_serialization
*/
//*************************************************************************************************
//**Matrix Serialization***************************************************************************
/*!\page matrix_serialization Matrix Serialization
//
// The serialization of matrices works in the same manner as the serialization of vectors. The
// following example demonstrates the (de-)serialization of dense and sparse matrices:
\code
using blaze::rowMajor;
using blaze::columnMajor;
// Serialization of both matrices
{
blaze::StaticMatrix<double,3UL,5UL,rowMajor> D;
blaze::CompressedMatrix<int,columnMajor> S;
// ... Resizing and initialization
// Creating an archive that writes into a the file "matrices.blaze"
blaze::Archive<std::ofstream> archive( "matrices.blaze" );
// Serialization of both matrices into the same archive. Note that D lies before S!
archive << D << S;
}
// Reconstitution of both matrices
{
blaze::DynamicMatrix<double,rowMajor> D1;
blaze::DynamicMatrix<int,rowMajor> D2;
// Creating an archive that reads from the file "matrices.blaze"
blaze::Archive<std::ifstream> archive( "matrices.blaze" );
// Reconstituting the former D matrix into D1. Note that it is possible to reconstitute
// the matrix into a differrent kind of matrix (StaticMatrix -> DynamicMatrix), but that
// the type of elements has to be the same.
archive >> D1;
// Reconstituting the former S matrix into D2. Note that is is even possible to reconstitute
// a sparse matrix as a dense matrix (also the reverse is possible) and that a column-major
// matrix can be reconstituted as row-major matrix (and vice versa). Note however that also
// in this case the type of elements is the same!
archive >> D2
}
\endcode
// Note that also in case of matrices it is possible to (de-)serialize matrices with vector or
// matrix elements:
\code
// Serialization
{
blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat;
// ... Resizing and initialization
// Creating an archive that writes into a the file "matrix.blaze"
blaze::Archive<std::ofstream> archive( "matrix.blaze" );
// Serialization of the matrix into the archive
archive << mat;
}
// Deserialization
{
blaze::CompressedMatrix< blaze::DynamicMatrix< blaze::complex<double> > > mat;
// Creating an archive that reads from the file "matrix.blaze"
blaze::Archive<std::ifstream> archive( "matrix.blaze" );
// Reconstitution of the matrix from the archive
archive >> mat;
}
\endcode
// Note that just as the vector serialization, the matrix serialization is restricted by a
// few important rules:
//
// - matrices cannot be reconstituted as vectors (and vice versa)
// - the element type of the serialized and reconstituted matrix must match, which means
// that on the source and destination platform the general type (signed/unsigned integral
// or floating point) and the size of the type must be exactly the same
// - when reconstituting a \c StaticMatrix, the number of rows and columns must match those
// of the serialized matrix
//
// In case an error is encountered during (de-)serialization, a \c std::runtime_exception is
// thrown.
//
// \n Previous: \ref vector_serialization Next: \ref customization \n
*/
//*************************************************************************************************
//**Customization**********************************************************************************
/*!\page customization Customization
//
// Although \b Blaze tries to work out of the box for every possible setting, still it may be
// necessary to adapt the library to specific requirements. The following three pages explain
// how to customize the \b Blaze library to your own needs:
//
// - \ref configuration_files
// - \ref vector_and_matrix_customization
// - \ref error_reporting_customization
//
// \n Previous: \ref matrix_serialization Next: \ref configuration_files
*/
//*************************************************************************************************
//**Configuration Files****************************************************************************
/*!\page configuration_files Configuration Files
//
// \tableofcontents
//
//
// Sometimes it is necessary to adapt \b Blaze to specific requirements. For this purpose
// \b Blaze provides several configuration files in the <tt>./blaze/config/</tt> subdirectory,
// which provide ample opportunity to customize internal settings, behavior, and thresholds.
// This chapter explains the most important of these configuration files. For a complete
// overview of all customization opportunities, please go to the configuration files in the
// <tt>./blaze/config/</tt> subdirectory or see the complete \b Blaze documentation.
//
//
// \n \section transpose_flag Default Vector Storage
// <hr>
//
// The \b Blaze default is that all vectors are created as column vectors (if not specified
// explicitly):
\code
blaze::StaticVector<double,3UL> x; // Creates a 3-dimensional static column vector
\endcode
// The header file <tt>./blaze/config/TransposeFlag.h</tt> allows the configuration of the default
// vector storage (i.e. the default transpose flag) of all vectors within the \b Blaze library.
// The default transpose flag is specified via the \c BLAZE_DEFAULT_TRANSPOSE_FLAG macro:
\code
#define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector
\endcode
// Alternatively the default transpose flag can be specified via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
#define BLAZE_DEFAULT_TRANSPOSE_FLAG blaze::columnVector
#include <blaze/Blaze.h>
\endcode
// Valid settings for \c BLAZE_DEFAULT_TRANSPOSE_FLAG are blaze::rowVector and blaze::columnVector.
//
//
// \n \section storage_order Default Matrix Storage
// <hr>
//
// Matrices are by default created as row-major matrices:
\code
blaze::StaticMatrix<double,3UL,3UL> A; // Creates a 3x3 row-major matrix
\endcode
// The header file <tt>./blaze/config/StorageOrder.h</tt> allows the configuration of the default
// matrix storage order. Via the \c BLAZE_DEFAULT_STORAGE_ORDER macro the default storage order
// for all matrices of the \b Blaze library can be specified.
\code
#define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor
\endcode
// Alternatively the default storage order can be specified via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
#define BLAZE_DEFAULT_STORAGE_ORDER blaze::rowMajor
#include <blaze/Blaze.h>
\endcode
// Valid settings for \c BLAZE_DEFAULT_STORAGE_ORDER are blaze::rowMajor and blaze::columnMajor.
//
//
// \n \section blas_mode BLAS Mode
// <hr>
//
// In order to achieve maximum performance for multiplications with dense matrices, \b Blaze can
// be configured to use a BLAS library. Via the following compilation switch in the configuration
// file <tt>./blaze/config/BLAS.h</tt> BLAS can be enabled:
\code
#define BLAZE_BLAS_MODE 1
\endcode
// In case the selected BLAS library provides parallel execution, the \c BLAZE_BLAS_IS_PARALLEL
// switch should be activated to prevent \b Blaze from parallelizing on its own:
\code
#define BLAZE_BLAS_IS_PARALLEL 1
\endcode
// Alternatively, both settings can be specified via command line or by defining the symbols
// manually before including any \b Blaze header file:
\code
#define BLAZE_BLAS_MODE 1
#define BLAZE_BLAS_IS_PARALLEL 1
#include <blaze/Blaze.h>
\endcode
// In case no BLAS library is available, \b Blaze will still work and will not be reduced in
// functionality, but performance may be limited.
//
//
// \n \section cache_size Cache Size
// <hr>
//
// The optimization of several \b Blaze compute kernels depends on the cache size of the target
// architecture. By default, \b Blaze assumes a cache size of 3 MiByte. However, for optimal
// speed the exact cache size of the system should be provided via the \c cacheSize value in the
// <tt>./blaze/config/CacheSize.h</tt> configuration file:
\code
#define BLAZE_CACHE_SIZE 3145728UL;
\endcode
// The cache size can also be specified via command line or by defining this symbol manually
// before including any \b Blaze header file:
\code
#define BLAZE_CACHE_SIZE 3145728UL
#include <blaze/Blaze.h>
\endcode
// \n \section vectorization Vectorization
// <hr>
//
// In order to achieve maximum performance and to exploit the compute power of a target platform
// the \b Blaze library attempts to vectorize all linear algebra operations by SSE, AVX, and/or
// AVX-512 intrinsics, depending on which instruction set is available. However, it is possible
// to disable the vectorization entirely by the compile time switch in the configuration file
// <tt>./blaze/config/Vectorization.h</tt>:
\code
#define BLAZE_USE_VECTORIZATION 1
\endcode
// It is also possible to (de-)activate vectorization via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
#define BLAZE_USE_VECTORIZATION 1
#include <blaze/Blaze.h>
\endcode
// In case the switch is set to 1, vectorization is enabled and the \b Blaze library is allowed
// to use intrinsics to speed up computations. In case the switch is set to 0, vectorization is
// disabled entirely and the \b Blaze library chooses default, non-vectorized functionality for
// the operations. Note that deactivating the vectorization may pose a severe performance
// limitation for a large number of operations!
//
//
// \n \section thresholds Thresholds
// <hr>
//
// For many computations \b Blaze distinguishes between small and large vectors and matrices.
// This separation is especially important for the parallel execution of computations, since
// the use of several threads only pays off for sufficiently large vectors and matrices.
// Additionally, it also enables \b Blaze to select kernels that are optimized for a specific
// size.
//
// In order to distinguish between small and large data structures \b Blaze provides several
// thresholds that can be adapted to the characteristics of the target platform. For instance,
// the \c DMATDVECMULT_THRESHOLD specifies the threshold between the application of the custom
// \b Blaze kernels for small dense matrix/dense vector multiplications and the BLAS kernels
// for large multiplications. All thresholds, including the thresholds for the OpenMP- and
// thread-based parallelization, are contained within the configuration file
// <tt>./blaze/config/Thresholds.h</tt>.
//
//
// \n \section padding Padding
// <hr>
//
// By default the \b Blaze library uses padding for all dense vectors and matrices in order to
// achieve maximum performance in all operations. Due to padding, the proper alignment of data
// elements can be guaranteed and the need for remainder loops is minimized. However, on the
// downside padding introduces an additional memory overhead, which can be large depending on
// the used data type.
//
// The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch
// that can be used to (de-)activate padding:
\code
#define BLAZE_USE_PADDING 1
\endcode
// Alternatively it is possible to (de-)activate padding via command line or by defining this
// symbol manually before including any \b Blaze header file:
\code
#define BLAZE_USE_PADDING 1
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_USE_PADDING is set to 1 padding is enabled for all dense vectors and matrices, if
// it is set to 0 padding is disabled. Note however that disabling padding can considerably reduce
// the performance of all dense vector and matrix operations!
//
//
// \n \section streaming Streaming (Non-Temporal Stores)
// <hr>
//
// For vectors and matrices that don't fit into the cache anymore non-temporal stores can provide
// a significant performance advantage of about 20%. However, this advantage is only in effect in
// case the memory bandwidth of the target architecture is maxed out. If the target architecture's
// memory bandwidth cannot be exhausted the use of non-temporal stores can decrease performance
// instead of increasing it.
//
// The configuration file <tt>./blaze/config/Optimizations.h</tt> provides a compile time switch
// that can be used to (de-)activate streaming:
\code
#define BLAZE_USE_STREAMING 1
\endcode
// Alternatively streaming can be (de-)activated via command line or by defining this symbol
// manually before including any \b Blaze header file:
\code
#define BLAZE_USE_STREAMING 1
#include <blaze/Blaze.h>
\endcode
// If \c BLAZE_USE_STREAMING is set to 1 streaming is enabled, if it is set to 0 streaming is
// disabled. It is recommended to consult the target architecture's white papers to decide whether
// streaming is beneficial or hurtful for performance.
//
//
// \n Previous: \ref customization Next: \ref vector_and_matrix_customization \n
*/
//*************************************************************************************************
//**Customization of Vectors and Matrices**********************************************************
/*!\page vector_and_matrix_customization Customization of Vectors and Matrices
//
// \tableofcontents
//
//
// \n \section custom_data_members Custom Data Members
// <hr>
//
// So far the \b Blaze library does not provide a lot of flexibility to customize the data
// members of existing \ref vector_types and \ref matrix_types. However, to some extend it is
// possible to customize vectors and matrices by inheritance. The following example gives an
// impression on how to create a simple variation of \ref matrix_types_custom_matrix, which
// automatically takes care of acquiring and releasing custom memory.
\code
template< typename Type // Data type of the matrix
, bool SO = defaultStorageOrder > // Storage order
class MyCustomMatrix
: public CustomMatrix< Type, unaligned, unpadded, SO >
{
public:
explicit inline MyCustomMatrix( size_t m, size_t n )
: CustomMatrix<Type,unaligned,unpadded,SO>()
, array_( new Type[m*n] )
{
this->reset( array_.get(), m, n );
}
private:
std::unique_ptr<Type[]> array_;
};
\endcode
// Please note that this is a simplified example with the intent to show the general approach.
// The number of constructors, the memory acquisition, and the kind of memory management can of
// course be adapted to specific requirements. Also, please note that since none of the \b Blaze
// vectors and matrices have virtual destructors polymorphic destruction cannot be used.
//
//
// \n \section custom_operations Custom Operations
// <hr>
//
// There are two approaches to extend \b Blaze with custom operations. First, the \c map()
// functions provide the possibility to execute componentwise custom operations on vectors and
// matrices. Second, it is possible to add customized free functions.
//
// \n \subsection custom_operations_map The map() Functions
//
// Via the unary and binary \c map() functions it is possible to execute componentwise custom
// operations on vectors and matrices. The unary \c map() function can be used to apply a custom
// operation on each single element of a dense vector or matrix or each non-zero element of a
// sparse vector or matrix. For instance, the following example demonstrates a custom square
// root computation on a dense matrix:
\code
blaze::DynamicMatrix<double> A, B;
B = map( A, []( double d ) { return std::sqrt( d ); } );
\endcode
// The binary \c map() function can be used to apply an operation pairwise to the elements of
// two dense vectors or two dense matrices. The following example demonstrates the merging of
// two matrices of double precision values into a matrix of double precision complex numbers:
\code
blaze::DynamicMatrix<double> real{ { 2.1, -4.2 }, { 1.0, 0.6 } };
blaze::DynamicMatrix<double> imag{ { 0.3, 1.4 }, { 2.9, -3.4 } };
blaze::DynamicMatrix< complex<double> > cplx;
// Creating the matrix
// ( (-2.1, 0.3) (-4.2, -1.4) )
// ( ( 1.0, 2.9) ( 0.6, -3.4) )
cplx = map( real, imag, []( double r, double i ){ return complex( r, i ); } );
\endcode
// These examples demonstrate the most convenient way of defining a unary custom operation by
// passing a lambda to the \c map() function. Alternatively, it is possible to pass a custom
// functor:
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
};
B = map( A, Sqrt() );
\endcode
// In order for the functor to work in a call to \c map() it must define a function call operator,
// which accepts arguments of the type of the according vector or matrix elements.
//
// Although the operation is automatically parallelized depending on the size of the vector or
// matrix, no automatic vectorization is possible. In order to enable vectorization, a \c load()
// function can be added to the functor, which handles the vectorized computation. Depending on
// the data type this function is passed one of the following \b Blaze SIMD data types:
//
// <ul>
// <li>SIMD data types for fundamental data types
// <ul>
// <li>\c blaze::SIMDint8: Packed SIMD type for 8-bit signed integral data types</li>
// <li>\c blaze::SIMDuint8: Packed SIMD type for 8-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint16: Packed SIMD type for 16-bit signed integral data types</li>
// <li>\c blaze::SIMDuint16: Packed SIMD type for 16-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint32: Packed SIMD type for 32-bit signed integral data types</li>
// <li>\c blaze::SIMDuint32: Packed SIMD type for 32-bit unsigned integral data types</li>
// <li>\c blaze::SIMDint64: Packed SIMD type for 64-bit signed integral data types</li>
// <li>\c blaze::SIMDuint64: Packed SIMD type for 64-bit unsigned integral data types</li>
// <li>\c blaze::SIMDfloat: Packed SIMD type for single precision floating point data</li>
// <li>\c blaze::SIMDdouble: Packed SIMD type for double precision floating point data</li>
// </ul>
// </li>
// <li>SIMD data types for complex data types
// <ul>
// <li>\c blaze::SIMDcint8: Packed SIMD type for complex 8-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint8: Packed SIMD type for complex 8-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint16: Packed SIMD type for complex 16-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint16: Packed SIMD type for complex 16-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint32: Packed SIMD type for complex 32-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint32: Packed SIMD type for complex 32-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcint64: Packed SIMD type for complex 64-bit signed integral data types</li>
// <li>\c blaze::SIMDcuint64: Packed SIMD type for complex 64-bit unsigned integral data types</li>
// <li>\c blaze::SIMDcfloat: Packed SIMD type for complex single precision floating point data</li>
// <li>\c blaze::SIMDcdouble: Packed SIMD type for complex double precision floating point data</li>
// </ul>
// </li>
// </ul>
//
// All SIMD types provide the \c value data member for a direct access to the underlying intrinsic
// data element. In the following example, this intrinsic element is passed to the AVX function
// \c _mm256_sqrt_pd():
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
SIMDdouble load( const SIMDdouble& a ) const
{
return _mm256_sqrt_pd( a.value );
}
};
\endcode
// In this example, whenever vectorization is generally applicable, the \c load() function is
// called instead of the function call operator for as long as the number of remaining elements
// is larger-or-equal to the width of the packed SIMD type. In all other cases (which also
// includes peel-off and remainder loops) the scalar operation is used.
//
// Please note that this example has two drawbacks: First, it will only compile in case the
// intrinsic \c _mm256_sqrt_pd() function is available (i.e. when AVX is active). Second, the
// availability of AVX is not taken into account. The first drawback can be alleviated by making
// the \c load() function a function template. The second drawback can be dealt with by adding a
// \c simdEnabled() function template to the functor:
\code
struct Sqrt
{
double operator()( double a ) const
{
return std::sqrt( a );
}
template< typename T >
T load( const T& a ) const
{
return _mm256_sqrt_pd( a.value );
}
template< typename T >
static constexpr bool simdEnabled() {
#if defined(__AVX__)
return true;
#else
return false;
#endif
}
};
\endcode
// The \c simdEnabled() function must be a \c static, \c constexpr function and must return whether
// or not vectorization is available for the given data type \c T. In case the function returns
// \c true, the \c load() function is used for a vectorized evaluation, in case the function
// returns \c false, \c load() is not called.
//
// Note that this is a simplified example that is only working when used for dense vectors and
// matrices with double precision floating point elements. The following code shows the complete
// implementation of the according functor that is used within the \b Blaze library. The \b Blaze
// \c Sqrt functor is working for all data types that are providing a square root operation:
\code
namespace blaze {
struct Sqrt
{
template< typename T >
BLAZE_ALWAYS_INLINE auto operator()( const T& a ) const
{
return sqrt( a );
}
template< typename T >
static constexpr bool simdEnabled() { return HasSIMDSqrt<T>::value; }
template< typename T >
BLAZE_ALWAYS_INLINE auto load( const T& a ) const
{
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T );
return sqrt( a );
}
};
} // namespace blaze
\endcode
// The same approach can be taken for binary custom operations. The following code demonstrates
// the \c Min functor of the \b Blaze library, which is working for all data types that provide
// a \c min() operation:
\code
struct Min
{
explicit inline Min()
{}
template< typename T1, typename T2 >
BLAZE_ALWAYS_INLINE decltype(auto) operator()( const T1& a, const T2& b ) const
{
return min( a, b );
}
template< typename T1, typename T2 >
static constexpr bool simdEnabled() { return HasSIMDMin<T1,T2>::value; }
template< typename T1, typename T2 >
BLAZE_ALWAYS_INLINE decltype(auto) load( const T1& a, const T2& b ) const
{
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T1 );
BLAZE_CONSTRAINT_MUST_BE_SIMD_PACK( T2 );
return min( a, b );
}
};
\endcode
// For more information on the available \b Blaze SIMD data types and functions, please see the
// SIMD module in the complete \b Blaze documentation.
//
// \n \subsection custom_operations_free_functions Free Functions
//
// In order to extend \b Blaze with new functionality it is possible to add free functions. Free
// functions can be used either as wrappers around calls to the map() function or to implement
// general, non-componentwise operations. The following two examples will demonstrate both ideas.
//
// The first example shows the \c setToZero() function, which resets a sparse matrix to zero
// without affecting the sparsity pattern. It is implemented as a convenience wrapper around
// the map() function:
\code
template< typename MT // Type of the sparse matrix
, bool SO > // Storage order
void setToZero( blaze::SparseMatrix<MT,SO>& mat )
{
(~mat) = blaze::map( ~mat, []( int ){ return 0; } );
}
\endcode
// The blaze::SparseMatrix class template is the base class for all kinds of sparse matrices and
// provides an abstraction from the actual type \c MT of the sparse matrix. However, due to the
// <a href="https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern">Curiously Recurring Template Pattern (CRTP)</a>
// it also enables a conversion back to the actual type. This downcast is performed via the tilde
// operator (i.e. \c operator~()). The template parameter \c SO represents the storage order
// (blaze::rowMajor or blaze::columnMajor) of the matrix.
//
// The second example shows the \c countZeros() function, which counts the number of values, which
// are exactly zero, in a dense, row-major matrix:
\code
template< typename MT >
size_t countZeros( blaze::DenseMatrix<MT,rowMajor>& mat )
{
const size_t M( (~mat).rows() );
const size_t N( (~mat).columns() );
size_t count( 0UL );
for( size_t i=0UL; i<M; ++i ) {
for( size_t j=0UL; j<N; ++j ) {
if( blaze::isDefault<strict>( (~mat)(i,j) ) )
++count;
}
}
return count;
}
\endcode
// The blaze::DenseMatrix class template is the base class for all kinds of dense matrices. Again,
// it is possible to perform the conversion to the actual type via the tilde operator.
//
// The following two listings show the declarations of all vector and matrix base classes, which
// can be used for custom free functions:
\code
template< typename VT // Concrete type of the dense or sparse vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class Vector;
template< typename VT // Concrete type of the dense vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class DenseVector;
template< typename VT // Concrete type of the sparse vector
, bool TF > // Transpose flag (blaze::columnVector or blaze::rowVector)
class SparseVector;
\endcode
\code
template< typename MT // Concrete type of the dense or sparse matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class Matrix;
template< typename MT // Concrete type of the dense matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class DenseMatrix;
template< typename MT // Concrete type of the sparse matrix
, bool SO > // Storage order (blaze::rowMajor or blaze::columnMajor)
class SparseMatrix;
\endcode
// \n \section custom_data_types Custom Data Types
// <hr>
//
// The \b Blaze library tries hard to make the use of custom data types as convenient, easy and
// intuitive as possible. However, unfortunately it is not possible to meet the requirements of
// all possible data types. Thus it might be necessary to provide \b Blaze with some additional
// information about the data type. The following sections give an overview of the necessary steps
// to enable the use of the hypothetical custom data type \c custom::double_t for vector and
// matrix operations. For example:
\code
blaze::DynamicVector<custom::double_t> a, b, c;
// ... Resizing and initialization
c = a + b;
\endcode
// The \b Blaze library assumes that the \c custom::double_t data type provides \c operator+()
// for additions, \c operator-() for subtractions, \c operator*() for multiplications and
// \c operator/() for divisions. If any of these functions is missing it is necessary to implement
// the operator to perform the according operation. For this example we assume that the custom
// data type provides the four following functions instead of operators:
\code
namespace custom {
double_t add ( const double_t& a, const double_t b );
double_t sub ( const double_t& a, const double_t b );
double_t mult( const double_t& a, const double_t b );
double_t div ( const double_t& a, const double_t b );
} // namespace custom
\endcode
// The following implementations will satisfy the requirements of the \b Blaze library:
\code
inline custom::double_t operator+( const custom::double_t& a, const custom::double_t& b )
{
return add( a, b );
}
inline custom::double_t operator-( const custom::double_t& a, const custom::double_t& b )
{
return sub( a, b );
}
inline custom::double_t operator*( const custom::double_t& a, const custom::double_t& b )
{
return mult( a, b );
}
inline custom::double_t operator/( const custom::double_t& a, const custom::double_t& b )
{
return div( a, b );
}
\endcode
// \b Blaze will use all the information provided with these functions (for instance the return
// type) to properly handle the operations. In the rare case that the return type cannot be
// automatically determined from the operator it might be additionally necessary to provide a
// specialization of the following four \b Blaze class templates:
\code
namespace blaze {
template<>
struct AddTrait<custom::double_t,custom::double_t> {
typedef custom::double_t Type;
};
template<>
struct SubTrait<custom::double_t,custom::double_t> {
typedef custom::double_t Type;
};
template<>
struct MultTrait<custom::double_t,custom::double_t> {
typedef custom::double_t Type;
};
template<>
struct DivTrait<custom::double_t,custom::double_t> {
typedef custom::double_t Type;
};
} // namespace blaze
\endcode
// The same steps are necessary if several custom data types need to be combined (as for instance
// \c custom::double_t and \c custom::float_t). Note that in this case both permutations need to
// be taken into account:
\code
custom::double_t operator+( const custom::double_t& a, const custom::float_t& b );
custom::double_t operator+( const custom::float_t& a, const custom::double_t& b );
// ...
\endcode
// Please note that only built-in data types apply for vectorization and thus custom data types
// cannot achieve maximum performance!
//
//
// \n Previous: \ref configuration_files Next: \ref custom_operations \n
*/
//*************************************************************************************************
//**Customization of the Error Reporting Mechanism*************************************************
/*!\page error_reporting_customization Customization of the Error Reporting Mechanism
//
// \tableofcontents
//
//
// \n \section error_reporting_background Background
// <hr>
//
// The default way of \b Blaze to report errors of any kind is to throw a standard exception.
// However, although in general this approach works well, in certain environments and under
// special circumstances exceptions may not be the mechanism of choice and a different error
// reporting mechanism may be desirable. For this reason, \b Blaze provides several macros,
// which enable the customization of the error reporting mechanism. Via these macros it is
// possible to replace the standard exceptions by some other exception type or a completely
// different approach to report errors.
//
//
// \n \section error_reporting_general_customization Customization of the Reporting Mechanism
// <hr>
//
// In some cases it might be necessary to adapt the entire error reporting mechanism and to
// replace it by some other means to signal failure. The primary macro for this purpose is the
// \c BLAZE_THROW macro:
\code
#define BLAZE_THROW( EXCEPTION ) \
throw EXCEPTION
\endcode
// This macro represents the default mechanism of the \b Blaze library to report errors of any
// kind. In order to customize the error reporing mechanism all that needs to be done is to
// define the macro prior to including any \b Blaze header file. This will cause the \b Blaze
// specific mechanism to be overridden. The following example demonstrates this by replacing
// exceptions by a call to a \c log() function and a direct call to abort:
\code
#define BLAZE_THROW( EXCEPTION ) \
log( "..." ); \
abort()
#include <blaze/Blaze.h>
\endcode
// Doing this will trigger a call to \c log() and an abort instead of throwing an exception
// whenever an error (such as an invalid argument) is detected.
//
// \note It is possible to execute several statements instead of executing a single statement to
// throw an exception. Also note that it is recommended to define the macro such that a subsequent
// semicolon is required!
//
// \warning This macro is provided with the intention to assist in adapting \b Blaze to special
// conditions and environments. However, the customization of the error reporting mechanism via
// this macro can have a significant effect on the library. Thus be advised to use the macro
// with due care!
//
//
// \n \section error_reporting_exception_customization Customization of the Type of Exceptions
// <hr>
//
// In addition to the customization of the entire error reporting mechanism it is also possible
// to customize the type of exceptions being thrown. This can be achieved by customizing any
// number of the following macros:
\code
#define BLAZE_THROW_BAD_ALLOC \
BLAZE_THROW( std::bad_alloc() )
#define BLAZE_THROW_LOGIC_ERROR( MESSAGE ) \
BLAZE_THROW( std::logic_error( MESSAGE ) )
#define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \
BLAZE_THROW( std::invalid_argument( MESSAGE ) )
#define BLAZE_THROW_LENGTH_ERROR( MESSAGE ) \
BLAZE_THROW( std::length_error( MESSAGE ) )
#define BLAZE_THROW_OUT_OF_RANGE( MESSAGE ) \
BLAZE_THROW( std::out_of_range( MESSAGE ) )
#define BLAZE_THROW_RUNTIME_ERROR( MESSAGE ) \
BLAZE_THROW( std::runtime_error( MESSAGE ) )
\endcode
// In order to customize the type of exception the according macro has to be defined prior to
// including any \b Blaze header file. This will override the \b Blaze default behavior. The
// following example demonstrates this by replacing \c std::invalid_argument by a custom
// exception type:
\code
class InvalidArgument
{
public:
InvalidArgument();
explicit InvalidArgument( const std::string& message );
// ...
};
#define BLAZE_THROW_INVALID_ARGUMENT( MESSAGE ) \
BLAZE_THROW( InvalidArgument( MESSAGE ) )
#include <blaze/Blaze.h>
\endcode
// By manually defining the macro, an \c InvalidArgument exception is thrown instead of a
// \c std::invalid_argument exception. Note that it is recommended to define the macro such
// that a subsequent semicolon is required!
//
// \warning These macros are provided with the intention to assist in adapting \b Blaze to
// special conditions and environments. However, the customization of the type of an exception
// via this macro may have an effect on the library. Thus be advised to use the macro with due
// care!
//
//
// \n \section error_reporting_special_errors Customization of Special Errors
// <hr>
//
// Last but not least it is possible to customize the error reporting for special kinds of errors.
// This can be achieved by customizing any number of the following macros:
\code
#define BLAZE_THROW_DIVISION_BY_ZERO( MESSAGE ) \
BLAZE_THROW_RUNTIME_ERROR( MESSAGE )
#define BLAZE_THROW_LAPACK_ERROR( MESSAGE ) \
BLAZE_THROW_RUNTIME_ERROR( MESSAGE )
\endcode
// As explained in the previous sections, in order to customize the handling of special errors
// the according macro has to be defined prior to including any \b Blaze header file. This will
// override the \b Blaze default behavior.
//
//
// \n Previous: \ref vector_and_matrix_customization Next: \ref blas_functions \n
*/
//*************************************************************************************************
//**BLAS Functions*********************************************************************************
/*!\page blas_functions BLAS Functions
//
// \tableofcontents
//
//
// For vector/vector, matrix/vector and matrix/matrix multiplications with large dense matrices
// \b Blaze relies on the efficiency of BLAS libraries. For this purpose, \b Blaze implements
// several convenient C++ wrapper functions for several BLAS functions. The following sections
// give a complete overview of all available BLAS level 1, 2 and 3 functions.
//
//
// \n \section blas_level_1 BLAS Level 1
// <hr>
//
// \subsection blas_level_1_dotu Dot Product (dotu)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotu_sub(), and \c zdotu_sub()):
\code
namespace blaze {
float dotu( int n, const float* x, int incX, const float* y, int incY );
double dotu( int n, const double* x, int incX, const double* y, int incY );
complex<float> dotu( int n, const complex<float>* x, int incX,
const complex<float>* y, int incY );
complex<double> dotu( int n, const complex<double>* x, int incX,
const complex<double>* y, int incY );
template< typename VT1, bool TF1, typename VT2, bool TF2 >
ElementType_<VT1> dotu( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y );
} // namespace blaze
\endcode
// \subsection blas_level_1_dotc Complex Conjugate Dot Product (dotc)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// complex conjugate dot product of two dense vectors (\c sdot(), \c ddot(), \c cdotc_sub(),
// and \c zdotc_sub()):
\code
namespace blaze {
float dotc( int n, const float* x, int incX, const float* y, int incY );
double dotc( int n, const double* x, int incX, const double* y, int incY );
complex<float> dotc( int n, const complex<float>* x, int incX,
const complex<float>* y, int incY );
complex<double> dotc( int n, const complex<double>* x, int incX,
const complex<double>* y, int incY );
template< typename VT1, bool TF1, typename VT2, bool TF2 >
ElementType_<VT1> dotc( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y );
} // namespace blaze
\endcode
// \subsection blas_level_1_axpy Axpy Product (axpy)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// axpy product of two dense vectors (\c saxpy(), \c daxpy(), \c caxpy(), and \c zaxpy()):
\code
namespace blaze {
void axpy( int n, float alpha, const float* x, int incX, float* y, int incY );
void axpy( int n, double alpha, const double* x, int incX, double* y, int incY );
void axpy( int n, complex<float> alpha, const complex<float>* x,
int incX, complex<float>* y, int incY );
void axpy( int n, complex<double> alpha, const complex<double>* x,
int incX, complex<double>* y, int incY );
template< typename VT1, bool TF1, typename VT2, bool TF2, typename ST >
void axpy( const DenseVector<VT1,TF1>& x, const DenseVector<VT2,TF2>& y, ST alpha );
} // namespace blaze
\endcode
// \n \section blas_level_2 BLAS Level 2
// <hr>
//
// \subsection blas_level_2_gemv General Matrix/Vector Multiplication (gemv)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// general matrix/vector multiplication (\c sgemv(), \c dgemv(), \c cgemv(), and \c zgemv()):
\code
namespace blaze {
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, float alpha,
const float* A, int lda, const float* x, int incX,
float beta, float* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, double alpha,
const double* A, int lda, const double* x, int incX,
double beta, double* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<float> alpha,
const complex<float>* A, int lda, const complex<float>* x, int incX,
complex<float> beta, complex<float>* y, int incY );
void gemv( CBLAS_ORDER layout, CBLAS_TRANSPOSE transA, int m, int n, complex<double> alpha,
const complex<double>* A, int lda, const complex<double>* x, int incX,
complex<double> beta, complex<double>* y, int incY );
template< typename VT1, typename MT1, bool SO, typename VT2, typename ST >
void gemv( DenseVector<VT1,false>& y, const DenseMatrix<MT1,SO>& A,
const DenseVector<VT2,false>& x, ST alpha, ST beta );
template< typename VT1, typename VT2, typename MT1, bool SO, typename ST >
void gemv( DenseVector<VT1,true>& y, const DenseVector<VT2,true>& x,
const DenseMatrix<MT1,SO>& A, ST alpha, ST beta );
} // namespace blaze
\endcode
// \n \subsection blas_level_2_trmv Triangular Matrix/Vector Multiplication (trmv)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// matrix/vector multiplication with a triangular matrix (\c strmv(), \c dtrmv(), \c ctrmv(),
// and \c ztrmv()):
\code
namespace blaze {
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const float* A, int lda, float* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const double* A, int lda, double* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const complex<float>* A, int lda, complex<float>* x, int incX );
void trmv( CBLAS_ORDER order, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA, CBLAS_DIAG diag,
int n, const complex<double>* A, int lda, complex<double>* x, int incX );
template< typename VT, typename MT, bool SO >
void trmv( DenseVector<VT,false>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo );
template< typename VT, typename MT, bool SO >
void trmv( DenseVector<VT,true>& x, const DenseMatrix<MT,SO>& A, CBLAS_UPLO uplo );
} // namespace blaze
\endcode
// \n \section blas_level_3 BLAS Level 3
// <hr>
//
// \subsection blas_level_3_gemm General Matrix/Matrix Multiplication (gemm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// general matrix/matrix multiplication (\c sgemm(), \c dgemm(), \c cgemm(), and \c zgemm()):
\code
namespace blaze {
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, float alpha, const float* A, int lda,
const float* B, int ldb, float beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, double alpha, const double* A, int lda,
const double* B, int ldb, double beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, complex<float> alpha, const complex<float>* A, int lda,
const complex<float>* B, int ldb, complex<float> beta, float* C, int ldc );
void gemm( CBLAS_ORDER order, CBLAS_TRANSPOSE transA, CBLAS_TRANSPOSE transB,
int m, int n, int k, complex<double> alpha, const complex<double>* A, int lda,
const complex<double>* B, int ldb, complex<double> beta, float* C, int ldc );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename MT3, bool SO3, typename ST >
void gemm( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A,
const DenseMatrix<MT3,SO3>& B, ST alpha, ST beta );
} // namespace blaze
\endcode
// \n \subsection blas_level_3_trmm Triangular Matrix/Matrix Multiplication (trmm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for the
// matrix/matrix multiplication with a triangular matrix (\c strmm(), \c dtrmm(), \c ctrmm(), and
// \c ztrmm()):
\code
namespace blaze {
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, float alpha, const float* A,
int lda, float* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, double alpha, const double* A,
int lda, double* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A,
int lda, complex<float>* B, int ldb );
void trmm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A,
int lda, complex<double>* B, int ldb );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST >
void trmm( DenseMatrix<MT1,SO1>& B, const DenseMatrix<MT2,SO2>& A,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
} // namespace blaze
\endcode
// \n \subsection blas_level_3_trsm Triangular System Solver (trsm)
//
// The following wrapper functions provide a generic interface for the BLAS functions for solving
// a triangular system of equations (\c strsm(), \c dtrsm(), \c ctrsm(), and \c ztrsm()):
\code
namespace blaze {
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, float alpha, const float* A,
int lda, float* B, int ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, double alpha, const double* A,
int lda, double* B, int ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<float> alpha, const complex<float>* A,
int lda, complex<float>* B, int ldb );
void trsm( CBLAS_ORDER order, CBLAS_SIDE side, CBLAS_UPLO uplo, CBLAS_TRANSPOSE transA,
CBLAS_DIAG diag, int m, int n, complex<double> alpha, const complex<double>* A,
int lda, complex<double>* B, int ldb );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
void trsm( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename ST >
void trsm( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B,
CBLAS_SIDE side, CBLAS_UPLO uplo, ST alpha );
} // namespace blaze
\endcode
// \n Previous: \ref error_reporting_customization Next: \ref lapack_functions \n
*/
//*************************************************************************************************
//**LAPACK Functions*******************************************************************************
/*!\page lapack_functions LAPACK Functions
//
// \tableofcontents
//
//
// \n \section lapack_introction Introduction
// <hr>
//
// The \b Blaze library makes extensive use of the LAPACK functionality for various compute tasks
// (including the decomposition, inversion and the computation of the determinant of dense matrices).
// For this purpose, \b Blaze implements several convenient C++ wrapper functions for all required
// LAPACK functions. The following sections give a complete overview of all available LAPACK wrapper
// functions. For more details on the individual LAPACK functions see the \b Blaze function
// documentation or the LAPACK online documentation browser:
//
// http://www.netlib.org/lapack/explore-html/
//
// Most of the wrapper functions are implemented as thin wrappers around LAPACK functions. They
// provide the parameters of the original LAPACK functions and thus provide maximum flexibility:
\code
constexpr size_t N( 100UL );
blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N );
// ... Initializing the matrix
const int m ( numeric_cast<int>( A.rows() ) ); // == N
const int n ( numeric_cast<int>( A.columns() ) ); // == N
const int lda ( numeric_cast<int>( A.spacing() ) ); // >= N
const int lwork( n*lda );
const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required
const std::unique_ptr<double[]> work( new double[N] ); // No initialization required
int info( 0 );
getrf( m, n, A.data(), lda, ipiv.get(), &info ); // Reports failure via 'info'
getri( n, A.data(), lda, ipiv.get(), work.get(), lwork, &info ); // Reports failure via 'info'
\endcode
// Additionally, \b Blaze provides wrappers that provide a higher level of abstraction. These
// wrappers provide a maximum of convenience:
\code
constexpr size_t N( 100UL );
blaze::DynamicMatrix<double,blaze::columnMajor> A( N, N );
// ... Initializing the matrix
const std::unique_ptr<int[]> ipiv( new int[N] ); // No initialization required
getrf( A, ipiv.get() ); // Cannot fail
getri( A, ipiv.get() ); // Reports failure via exception
\endcode
// \note All functions only work for general, non-adapted matrices with \c float, \c double,
// \c complex<float>, or \c complex<double> element type. The attempt to call the function with
// adaptors or matrices of any other element type results in a compile time error!
//
// \note All functions can only be used if a fitting LAPACK library is available and linked to
// the final executable. Otherwise a call to this function will result in a linker error.
//
// \note For performance reasons all functions do only provide the basic exception safety guarantee,
// i.e. in case an exception is thrown the given matrix may already have been modified.
//
//
// \n \section lapack_decomposition Matrix Decomposition
// <hr>
//
// The following functions decompose/factorize the given dense matrix. Based on this decomposition
// the matrix can be inverted or used to solve a linear system of equations.
//
//
// \n \subsection lapack_lu_decomposition LU Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgetrf(), \c dgetrf(),
// \c cgetrf(), and \c zgetrf(), which compute the LU decomposition for the given general matrix:
\code
namespace blaze {
void getrf( int m, int n, float* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, double* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, complex<float>* A, int lda, int* ipiv, int* info );
void getrf( int m, int n, complex<double>* A, int lda, int* ipiv, int* info );
template< typename MT, bool SO >
void getrf( DenseMatrix<MT,SO>& A, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = P \cdot L \cdot U, \f]\n
// where \c P is a permutation matrix, \c L is a lower unitriangular matrix, and \c U is an upper
// triangular matrix. The resulting decomposition is stored within \a A: In case of a column-major
// matrix, \c L is stored in the lower part of \a A and \c U is stored in the upper part. The unit
// diagonal elements of \c L are not stored. In case \a A is a row-major matrix the result is
// transposed.
//
// \note The LU decomposition will never fail, even for singular matrices. However, in case of a
// singular matrix the resulting decomposition cannot be used for a matrix inversion or solving
// a linear system of equations.
//
//
// \n \subsection lapack_ldlt_decomposition LDLT Decomposition
//
// The following functions provide an interface for the LAPACK functions \c ssytrf(), \c dsytrf(),
// \c csytrf(), and \c zsytrf(), which compute the LDLT (Bunch-Kaufman) decomposition for the given
// symmetric indefinite matrix:
\code
namespace blaze {
void sytrf( char uplo, int n, float* A, int lda, int* ipiv, float* work, int lwork, int* info );
void sytrf( char uplo, int n, double* A, int lda, int* ipiv, double* work, int lwork, int* info );
void sytrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info );
void sytrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void sytrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U D U^{T} \texttt{ (if uplo = 'U'), or }
A = L D L^{T} \texttt{ (if uplo = 'L'), } \f]
// where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices,
// and \c D is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting
// decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in
// the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to
// \c 'U' the result is stored in the upper part and the lower part remains untouched.
//
// \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in
// case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or
// solving a linear system of equations.
//
//
// \n \subsection lapack_ldlh_decomposition LDLH Decomposition
//
// The following functions provide an interface for the LAPACK functions \c chetrf() and \c zsytrf(),
// which compute the LDLH (Bunch-Kaufman) decomposition for the given Hermitian indefinite matrix:
\code
namespace blaze {
void hetrf( char uplo, int n, complex<float>* A, int lda, int* ipiv, complex<float>* work, int lwork, int* info );
void hetrf( char uplo, int n, complex<double>* A, int lda, int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void hetrf( DenseMatrix<MT,SO>& A, char uplo, int* ipiv );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U D U^{H} \texttt{ (if uplo = 'U'), or }
A = L D L^{H} \texttt{ (if uplo = 'L'), } \f]
// where \c U (or \c L) is a product of permutation and unit upper (lower) triangular matrices,
// and \c D is Hermitian and block diagonal with 1-by-1 and 2-by-2 diagonal blocks. The resulting
// decomposition is stored within \a A: In case \a uplo is set to \c 'L' the result is stored in
// the lower part of the matrix and the upper part remains untouched, in case \a uplo is set to
// \c 'U' the result is stored in the upper part and the lower part remains untouched.
//
// \note The Bunch-Kaufman decomposition will never fail, even for singular matrices. However, in
// case of a singular matrix the resulting decomposition cannot be used for a matrix inversion or
// solving a linear system of equations.
//
//
// \n \subsection lapack_llh_decomposition Cholesky Decomposition
//
// The following functions provide an interface for the LAPACK functions \c spotrf(), \c dpotrf(),
// \c cpotrf(), and \c zpotrf(), which compute the Cholesky (LLH) decomposition for the given
// positive definite matrix:
\code
namespace blaze {
void potrf( char uplo, int n, float* A, int lda, int* info );
void potrf( char uplo, int n, double* A, int lda, int* info );
void potrf( char uplo, int n, complex<float>* A, int lda, int* info );
void potrf( char uplo, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void potrf( DenseMatrix<MT,SO>& A, char uplo );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = U^{T} U \texttt{ (if uplo = 'U'), or }
A = L L^{T} \texttt{ (if uplo = 'L'), } \f]
// where \c U is an upper triangular matrix and \c L is a lower triangular matrix. The Cholesky
// decomposition fails if the given matrix \a A is not a positive definite matrix. In this case
// a \a std::std::invalid_argument exception is thrown.
//
//
// \n \subsection lapack_qr_decomposition QR Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgeqrf(), \c dgeqrf(),
// \c cgeqrf(), and \c zgeqrf(), which compute the QR decomposition of the given general matrix:
\code
namespace blaze {
void geqrf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void geqrf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void geqrf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void geqrf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void geqrf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = Q \cdot R, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and
// <tt>v(i) = 1</tt>. <tt>v(i+1:m)</tt> is stored on exit in <tt>A(i+1:m,i)</tt>, and \c tau
// in \c tau(i). Thus on exit the elements on and above the diagonal of the matrix contain the
// min(\a m,\a n)-by-\a n upper trapezoidal matrix \c R (\c R is upper triangular if \a m >= \a n);
// the elements below the diagonal, with the array \c tau, represent the orthogonal matrix \c Q as
// a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgqr(), \c dorgqr(),
// \c cungqr(), and \c zunqqr(), which reconstruct the \c Q matrix from a QR decomposition:
\code
namespace blaze {
void orgqr( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgqr( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void ungqr( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungqr( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orgqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void ungqr( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormqr(), \c dormqr(),
// \c cunmqr(), and \c zunmqr(), which can be used to multiply a matrix with the \c Q matrix from
// a QR decomposition:
\code
namespace blaze {
void ormqr( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormqr( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
void unmqr( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmqr( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormqr( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
template< typename MT1, bool SO, typename MT2 >
void unmqr( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_rq_decomposition RQ Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgerqf(), \c dgerqf(),
// \c cgerqf(), and \c zgerqf(), which compute the RQ decomposition of the given general matrix:
\code
namespace blaze {
void gerqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void gerqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void gerqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void gerqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void gerqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = R \cdot Q, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(1) H(2) . . . H(k) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(n-k+i+1:n) = 0</tt> and
// <tt>v(n-k+i) = 1</tt>. <tt>v(1:n-k+i-1)</tt> is stored on exit in <tt>A(m-k+i,1:n-k+i-1)</tt>,
// and \c tau in \c tau(i). Thus in case \a m <= \a n, the upper triangle of the subarray
// <tt>A(1:m,n-m+1:n)</tt> contains the \a m-by-\a m upper triangular matrix \c R and in case
// \a m >= \a n, the elements on and above the (\a m-\a n)-th subdiagonal contain the \a m-by-\a n
// upper trapezoidal matrix \c R; the remaining elements in combination with the array \c tau
// represent the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgrq(), \c dorgrq(),
// \c cungrq(), and \c zunqrq(), which reconstruct the \c Q matrix from a RQ decomposition:
\code
namespace blaze {
void orgrq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgrq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void ungrq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungrq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orgrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void ungrq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormrq(), \c dormrq(),
// \c cunmrq(), and \c zunmrq(), which can be used to multiply a matrix with the \c Q matrix from
// a RQ decomposition:
\code
namespace blaze {
void ormrq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormrq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
void unmrq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmrq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormrq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
template< typename MT1, bool SO, typename MT2 >
void unmrq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_ql_decomposition QL Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgeqlf(), \c dgeqlf(),
// \c cgeqlf(), and \c zgeqlf(), which compute the QL decomposition of the given general matrix:
\code
namespace blaze {
void geqlf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void geqlf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void geqlf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void geqlf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void geqlf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = Q \cdot L, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(m-k+i+1:m) = 0</tt> and
// <tt>v(m-k+i) = 1</tt>. <tt>v(1:m-k+i-1)</tt> is stored on exit in <tt>A(1:m-k+i-1,n-k+i)</tt>,
// and \c tau in \c tau(i). Thus in case \a m >= \a n, the lower triangle of the subarray
// A(m-n+1:m,1:n) contains the \a n-by-\a n lower triangular matrix \c L and in case \a m <= \a n,
// the elements on and below the (\a n-\a m)-th subdiagonal contain the \a m-by-\a n lower
// trapezoidal matrix \c L; the remaining elements in combination with the array \c tau represent
// the orthogonal matrix \c Q as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorgql(), \c dorgql(),
// \c cungql(), and \c zunqql(), which reconstruct the \c Q matrix from an QL decomposition:
\code
namespace blaze {
void orgql( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orgql( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void ungql( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void ungql( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orgql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void ungql( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormql(), \c dormql(),
// \c cunmql(), and \c zunmql(), which can be used to multiply a matrix with the \c Q matrix from
// a QL decomposition:
\code
namespace blaze {
void ormql( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormql( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
void unmql( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmql( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormql( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
template< typename MT1, bool SO, typename MT2 >
void unmql( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \subsection lapack_lq_decomposition LQ Decomposition
//
// The following functions provide an interface for the LAPACK functions \c sgelqf(), \c dgelqf(),
// \c cgelqf(), and \c zgelqf(), which compute the LQ decomposition of the given general matrix:
\code
namespace blaze {
void gelqf( int m, int n, float* A, int lda, float* tau, float* work, int lwork, int* info );
void gelqf( int m, int n, double* A, int lda, double* tau, double* work, int lwork, int* info );
void gelqf( int m, int n, complex<float>* A, int lda, complex<float>* tau, complex<float>* work, int lwork, int* info );
void gelqf( int m, int n, complex<double>* A, int lda, complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void gelqf( DenseMatrix<MT,SO>& A, typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The decomposition has the form
\f[ A = L \cdot Q, \f]
// where the \c Q is represented as a product of elementary reflectors
\f[ Q = H(k) . . . H(2) H(1) \texttt{, with k = min(m,n).} \f]
// Each H(i) has the form
\f[ H(i) = I - tau \cdot v \cdot v^T, \f]
// where \c tau is a real scalar, and \c v is a real vector with <tt>v(0:i-1) = 0</tt> and
// <tt>v(i) = 1</tt>. <tt>v(i+1:n)</tt> is stored on exit in <tt>A(i,i+1:n)</tt>, and \c tau
// in \c tau(i). Thus on exit the elements on and below the diagonal of the matrix contain the
// \a m-by-min(\a m,\a n) lower trapezoidal matrix \c L (\c L is lower triangular if \a m <= \a n);
// the elements above the diagonal, with the array \c tau, represent the orthogonal matrix \c Q
// as a product of min(\a m,\a n) elementary reflectors.
//
// The following functions provide an interface for the LAPACK functions \c sorglq(), \c dorglq(),
// \c cunglq(), and \c zunqlq(), which reconstruct the \c Q matrix from an LQ decomposition:
\code
namespace blaze {
void orglq( int m, int n, int k, float* A, int lda, const float* tau, float* work, int lwork, int* info );
void orglq( int m, int n, int k, double* A, int lda, const double* tau, double* work, int lwork, int* info );
void unglq( int m, int n, int k, complex<float>* A, int lda, const complex<float>* tau, complex<float>* work, int lwork, int* info );
void unglq( int m, int n, int k, complex<double>* A, int lda, const complex<double>* tau, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void orglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
template< typename MT, bool SO >
void unglq( DenseMatrix<MT,SO>& A, const typename MT::ElementType* tau );
} // namespace blaze
\endcode
// The following functions provide an interface for the LAPACK functions \c sormlq(), \c dormlq(),
// \c cunmlq(), and \c zunmlq(), which can be used to multiply a matrix with the \c Q matrix from
// a LQ decomposition:
\code
namespace blaze {
void ormlq( char side, char trans, int m, int n, int k, const float* A, int lda, const float* tau, float* C, int ldc, float* work, int lwork, int* info );
void ormlq( char side, char trans, int m, int n, int k, const double* A, int lda, const double* tau, double* C, int ldc, double* work, int lwork, int* info );
void unmlq( char side, char trans, int m, int n, int k, const complex<float>* A, int lda, const complex<float>* tau, complex<float>* C, int ldc, complex<float>* work, int lwork, int* info );
void unmlq( char side, char trans, int m, int n, int k, const complex<double>* A, int lda, const complex<double>* tau, complex<double>* C, int ldc, complex<double>* work, int lwork, int* info );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void ormlq( DenseMatrix<MT1,SO1>& C, const DenseMatrix<MT2,SO2>& A, char side, char trans, const ElementType_<MT2>* tau );
template< typename MT1, bool SO, typename MT2 >
void unmlq( DenseMatrix<MT1,SO>& C, DenseMatrix<MT2,SO>& A, char side, char trans, ElementType_<MT2>* tau );
} // namespace blaze
\endcode
// \n \section lapack_inversion Matrix Inversion
// <hr>
//
// Given a matrix that has already been decomposed, the following functions can be used to invert
// the matrix in-place.
//
//
// \n \subsection lapack_lu_inversion LU-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c sgetri(), \c dgetri(),
// \c cgetri(), and \c zgetri(), which invert a general matrix that has already been decomposed by
// an \ref lapack_lu_decomposition :
\code
namespace blaze {
void getri( int n, float* A, int lda, const int* ipiv, float* work, int lwork, int* info );
void getri( int n, double* A, int lda, const int* ipiv, double* work, int lwork, int* info );
void getri( int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int lwork, int* info );
void getri( int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO >
void getri( DenseMatrix<MT,SO>& A, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_inversion LDLT-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c ssytri(), \c dsytri(),
// \c csytri(), and \c zsytri(), which invert a symmetric indefinite matrix that has already been
// decomposed by an \ref lapack_ldlt_decomposition :
\code
namespace blaze {
void sytri( char uplo, int n, float* A, int lda, const int* ipiv, float* work, int* info );
void sytri( char uplo, int n, double* A, int lda, const int* ipiv, double* work, int* info );
void sytri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info );
void sytri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info );
template< typename MT, bool SO >
void sytri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_inversion LDLH-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c chetri() and
// \c zhetri(), which invert an Hermitian indefinite matrix that has already been decomposed by
// an \ref lapack_ldlh_decomposition :
\code
namespace blaze {
void hetri( char uplo, int n, complex<float>* A, int lda, const int* ipiv, complex<float>* work, int* info );
void hetri( char uplo, int n, complex<double>* A, int lda, const int* ipiv, complex<double>* work, int* info );
template< typename MT, bool SO >
void hetri( DenseMatrix<MT,SO>& A, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_inversion Cholesky-based Inversion
//
// The following functions provide an interface for the LAPACK functions \c spotri(), \c dpotri(),
// \c cpotri(), and \c zpotri(), which invert a positive definite matrix that has already been
// decomposed by an \ref lapack_llh_decomposition :
\code
namespace blaze {
void potri( char uplo, int n, float* A, int lda, int* info );
void potri( char uplo, int n, double* A, int lda, int* info );
void potri( char uplo, int n, complex<float>* A, int lda, int* info );
void potri( char uplo, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void potri( DenseMatrix<MT,SO>& A, char uplo );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_inversion Inversion of Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strtri(), \c dtrtri(),
// \c ctrtri(), and \c ztrtri(), which invert the given triangular matrix in-place:
\code
namespace blaze {
void trtri( char uplo, char diag, int n, float* A, int lda, int* info );
void trtri( char uplo, char diag, int n, double* A, int lda, int* info );
void trtri( char uplo, char diag, int n, complex<float>* A, int lda, int* info );
void trtri( char uplo, char diag, int n, complex<double>* A, int lda, int* info );
template< typename MT, bool SO >
void trtri( DenseMatrix<MT,SO>& A, char uplo, char diag );
} // namespace blaze
\endcode
// The functions fail if ...
//
// - ... the given matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a diag argument is neither 'U' nor 'N';
// - ... the given matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \section lapack_substitution Substitution
// <hr>
//
// Given a matrix that has already been decomposed the following functions can be used to perform
// the forward/backward substitution step to compute the solution to a system of linear equations.
// Note that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems:
//
// Single right-hand side:
// - \f$ A *x=b \f$ if \a A is column-major
// - \f$ A^T*x=b \f$ if \a A is row-major
//
// Multiple right-hand sides:
// - \f$ A *X =B \f$ if both \a A and \a B are column-major
// - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major
// - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major
// - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major
//
// In this context the general system matrix \a A is a n-by-n matrix that has already been
// factorized by the according decomposition function, \a x and \a b are n-dimensional vectors
// and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices.
//
//
// \n \subsection lapack_lu_substitution LU-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c sgetrs(), \c dgetrs(),
// \c cgetrs(), and \c zgetrs(), which perform the substitution step for a general matrix that has
// already been decomposed by an \ref lapack_lu_decomposition :
\code
namespace blaze {
void getrs( char trans, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info );
void getrs( char trans, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info );
void getrs( char trans, int n, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void getrs( char trans, int n, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void getrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char trans, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void getrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char trans, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_substitution LDLT-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c ssytrs(), \c dsytrs(),
// \c csytrs(), and \c zsytrs(), which perform the substitution step for a symmetric indefinite
// matrix that has already been decomposed by an \ref lapack_ldlt_decomposition :
\code
namespace blaze {
void sytrs( char uplo, int n, int nrhs, const float* A, int lda, const int* ipiv, float* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const double* A, int lda, const int* ipiv, double* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void sytrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void sytrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void sytrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_substitution LDLH-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c chetrs(), and \c zhetrs(),
// which perform the substitution step for an Hermitian indefinite matrix that has already been
// decomposed by an \ref lapack_ldlh_decomposition :
\code
namespace blaze {
void hetrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, const int* ipiv, complex<float>* B, int ldb, int* info );
void hetrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, const int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void hetrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, const int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void hetrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, const int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first two functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_substitution Cholesky-based Substitution
//
// The following functions provide an interface for the LAPACK functions \c spotrs(), \c dpotrs(),
// \c cpotrs(), and \c zpotrs(), which perform the substitution step for a positive definite matrix
// that has already been decomposed by an \ref lapack_llh_decomposition :
\code
namespace blaze {
void potrs( char uplo, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void potrs( char uplo, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void potrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void potrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match.
//
// The first two functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_substitution Substitution for Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strtrs(), \c dtrtrs(),
// \c ctrtrs(), and \c ztrtrs(), which perform the substitution step for a triangular matrix:
\code
namespace blaze {
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const float* A, int lda, float* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const double* A, int lda, double* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void trtrs( char uplo, char trans, char diag, int n, int nrhs, const complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void trtrs( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void trtrs( const DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, char trans, char diag );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the solution(s)
// of the linear system of equations. The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the given \a diag argument is neither 'U' nor 'N';
// - ... the sizes of the two given matrices do not match.
//
// The first four functions report failure via the \c info argument, the last two functions throw
// a \a std::invalid_argument exception in case of an error.
//
//
// \n \section lapack_linear_system_solver Linear System Solver
// <hr>
//
// The following functions represent compound functions that perform both the decomposition step
// as well as the substitution step to compute the solution to a system of linear equations. Note
// that depending on the storage order of the system matrix and the given right-hand side the
// functions solve different equation systems:
//
// Single right-hand side:
// - \f$ A *x=b \f$ if \a A is column-major
// - \f$ A^T*x=b \f$ if \a A is row-major
//
// Multiple right-hand sides:
// - \f$ A *X =B \f$ if both \a A and \a B are column-major
// - \f$ A^T*X =B \f$ if \a A is row-major and \a B is column-major
// - \f$ A *X^T=B^T \f$ if \a A is column-major and \a B is row-major
// - \f$ A^T*X^T=B^T \f$ if both \a A and \a B are row-major
//
// In this context the general system matrix \a A is a n-by-n matrix that has already been
// factorized by the according decomposition function, \a x and \a b are n-dimensional vectors
// and \a X and \a B are either row-major m-by-n matrices or column-major n-by-m matrices.
//
//
// \subsection lapack_lu_linear_system_solver LU-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c sgesv(), \c dgesv(),
// \c cgesv(), and \c zgesv(), which combine an \ref lapack_lu_decomposition and the according
// \ref lapack_lu_substitution :
\code
namespace blaze {
void gesv( int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, int* info );
void gesv( int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, int* info );
void gesv( int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, int* info );
void gesv( int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void gesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_lu_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlt_linear_system_solver LDLT-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c ssysv(), \c dsysv(),
// \c csysv(), and \c zsysv(), which combine an \ref lapack_ldlt_decomposition and the according
// \ref lapack_ldlt_substitution :
\code
namespace blaze {
void sysv( char uplo, int n, int nrhs, float* A, int lda, int* ipiv, float* B, int ldb, float* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, double* A, int lda, int* ipiv, double* B, int ldb, double* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info );
void sysv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void sysv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void sysv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_ldlt_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_ldlh_linear_system_solver LDLH-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c shesv(), \c dhesv(),
// \c chesv(), and \c zhesv(), which combine an \ref lapack_ldlh_decomposition and the according
// \ref lapack_ldlh_substitution :
\code
namespace blaze {
void hesv( char uplo, int n, int nrhs, complex<float>* A, int lda, int* ipiv, complex<float>* B, int ldb, complex<float>* work, int lwork, int* info );
void hesv( char uplo, int n, int nrhs, complex<double>* A, int lda, int* ipiv, complex<double>* B, int ldb, complex<double>* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void hesv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, int* ipiv );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void hesv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo, int* ipiv );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_ldlh_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first two functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_llh_linear_system_solver Cholesky-based Linear System Solver
//
// The following functions provide an interface for the LAPACK functions \c sposv(), \c dposv(),
// \c cposv(), and \c zposv(), which combine an \ref lapack_llh_decomposition and the according
// \ref lapack_llh_substitution :
\code
namespace blaze {
void posv( char uplo, int n, int nrhs, float* A, int lda, float* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, double* A, int lda, double* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, complex<float>* A, int lda, complex<float>* B, int ldb, int* info );
void posv( char uplo, int n, int nrhs, complex<double>* A, int lda, complex<double>* B, int ldb, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void posv( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo );
template< typename MT1, bool SO1, typename MT2, bool SO2 >
void posv( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& B, char uplo );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations and \a A has been decomposed by means of an
// \ref lapack_llh_decomposition.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the sizes of the two given matrices do not match;
// - ... the given system matrix is singular and not invertible.
//
// The first four functions report failure via the \c info argument, the fifth function throws a
// \a std::invalid_argument exception in case of an error.
//
//
// \n \subsection lapack_triangular_linear_system_solver Linear System Solver for Triangular Matrices
//
// The following functions provide an interface for the LAPACK functions \c strsv(), \c dtrsv(),
// \c ctrsv(), and \c ztrsv():
\code
namespace blaze {
void trsv( char uplo, char trans, char diag, int n, const float* A, int lda, float* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const double* A, int lda, double* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const complex<float>* A, int lda, complex<float>* x, int incX );
void trsv( char uplo, char trans, char diag, int n, const complex<double>* A, int lda, complex<double>* x, int incX );
template< typename MT, bool SO, typename VT, bool TF >
void trsv( const DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& b, char uplo, char trans, char diag );
} // namespace blaze
\endcode
// If the function exits successfully, the vector \a b or the matrix \a B contain the
// solution(s) of the linear system of equations.
//
// The functions fail if ...
//
// - ... the given system matrix is not a square matrix;
// - ... the given \a uplo argument is neither 'L' nor 'U';
// - ... the given \a trans argument is neither 'N' nor 'T' nor 'C';
// - ... the given \a diag argument is neither 'U' nor 'N'.
//
// The last function throws a \a std::invalid_argument exception in case of an error. Note that
// none of the functions does perform any test for singularity or near-singularity. Such tests
// must be performed prior to calling this function!
//
//
// \n \section lapack_eigenvalues Eigenvalues/Eigenvectors
//
// \subsection lapack_eigenvalues_general General Matrices
//
// The following functions provide an interface for the LAPACK functions \c sgeev(), \c dgeev(),
// \c cgeev(), and \c zgeev(), which compute the eigenvalues and optionally the eigenvectors of
// the given general matrix:
\code
namespace blaze {
void geev( char jobvl, char jobvr, int n, float* A, int lda, float* wr, float* wi, float* VL, int ldvl, float* VR, int ldvr, float* work, int lwork, int* info );
void geev( char jobvl, char jobvr, int n, double* A, int lda, double* wr, double* wi, double* VL, int ldvl, double* VR, int ldvr, double* work, int lwork, int* info );
void geev( char jobvl, char jobvr, int n, complex<float>* A, int lda, complex<float>* w, complex<float>* VL, int ldvl, complex<float>* VR, int ldvr, complex<float>* work, int lwork, float* rwork, int* info );
void geev( char jobvl, char jobvr, int n, complex<double>* A, int lda, complex<double>* w, complex<double>* VL, int ldvl, complex<double>* VR, int ldvr, complex<double>* work, int lwork, double* rwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void geev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF >
void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
void geev( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& VR );
template< typename MT1, bool SO1, typename MT2, bool SO2, typename VT, bool TF, typename MT3, bool SO3 >
void geev( DenseMatrix<MT1,SO1>& A, DenseMatrix<MT2,SO2>& VL, DenseVector<VT,TF>& w, DenseMatrix<MT3,SO3>& VR );
} // namespace blaze
\endcode
// The complex eigenvalues of the given matrix \a A are returned in the given vector \a w.
// Please note that no order of eigenvalues can be assumed, except that complex conjugate pairs
// of eigenvalues appear consecutively with the eigenvalue having the positive imaginary part
// first.
//
// If \a VR is provided as an argument, the right eigenvectors are returned in the rows of \a VR
// in case \a VR is a row-major matrix and in the columns of \a VR in case \a VR is a column-major
// matrix. The right eigenvector \f$v[j]\f$ of \a A satisfies
\f[ A * v[j] = lambda[j] * v[j], \f]
// where \f$lambda[j]\f$ is its eigenvalue.
//
// If \a VL is provided as an argument, the left eigenvectors are returned in the rows of \a VL
// in case \a VL is a row-major matrix and in the columns of \a VL in case \a VL is a column-major
// matrix. The left eigenvector \f$u[j]\f$ of \a A satisfies
\f[ u[j]^{H} * A = lambda[j] * u[j]^{H}, \f]
// where \f$u[j]^{H}\f$ denotes the conjugate transpose of \f$u[j]\f$.
//
// \a w, \a VL, and \a VR are resized to the correct dimensions (if possible and necessary). The
// functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given matrix \a VL is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a VR is a fixed size matrix and the dimensions don't match;
// - ... the eigenvalue computation fails.
//
// The first four functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \subsection lapack_eigenvalues_symmetric Symmetric Matrices
//
// The following functions provide an interface for the LAPACK functions \c ssyev() and \c dsyev(),
// which compute the eigenvalues and eigenvectors of the given symmetric matrix:
\code
namespace blaze {
void syev( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* info );
void syev( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void syev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c ssyevd() and \c dsyevd(). In contrast to the \c syev() functions they use a
// divide-and-conquer strategy for the computation of the left and right eigenvectors:
\code
namespace blaze {
void syevd( char jobz, char uplo, int n, float* A, int lda, float* w, float* work, int lwork, int* iwork, int liwork, int* info );
void syevd( char jobz, char uplo, int n, double* A, int lda, double* w, double* work, int lwork, int* iwork, int liwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void syevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized
// to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left
// eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right
// eigenvectors are returned in the columns of \a A.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given \a jobz argument is neither \c 'V' nor \c 'N';
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last function throws an
// exception in case of an error.
//
// Via the following functions, which wrap the LAPACK functions \c ssyevx() and \c dsyevx(), it
// is possible to compute a subset of eigenvalues and/or eigenvectors of a symmetric matrix:
\code
namespace blaze {
void syevx( char jobz, char range, char uplo, int n, float* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, float* Z, int ldz, float* work, int lwork, int* iwork, int* ifail, int* info );
void syevx( char jobz, char range, char uplo, int n, double* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, double* Z, int ldz, double* work, int lwork, int* iwork, int* ifail, int* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t syevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST >
size_t syevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp );
} // namespace blaze
\endcode
// The number of eigenvalues to be computed is specified by the lower bound \c low and the upper
// bound \c upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp are of integral type, the function computes all eigenvalues in the
// index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending
// order in the given vector \a w, which is either resized (if possible) or expected to be a
// \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is
// row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is
// resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num
// column-major matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all eigenvalues
// in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in
// ascending order in the given vector \a w, which is either resized (if possible) or expected
// to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case
// \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix.
// \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match;
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \subsection lapack_eigenvalues_hermitian Hermitian Matrices
//
// The following functions provide an interface for the LAPACK functions \c cheev() and \c zheev(),
// which compute the eigenvalues and eigenvectors of the given Hermitian matrix:
\code
namespace blaze {
void heev( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* info );
void heev( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, float* rwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void heev( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c cheevd() and \c zheevd(). In contrast to the \c heev() functions they use a
// divide-and-conquer strategy for the computation of the left and right eigenvectors:
\code
namespace blaze {
void heevd( char jobz, char uplo, int n, complex<float>* A, int lda, float* w, complex<float>* work, int lwork, float* rwork, int* lrwork, int* iwork, int* liwork, int* info );
void heevd( char jobz, char uplo, int n, complex<double>* A, int lda, double* w, complex<double>* work, int lwork, double* rwork, int lrwork, int* iwork, int* liwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void heevd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char jobz, char uplo );
} // namespace blaze
\endcode
// The real eigenvalues are returned in ascending order in the given vector \a w. \a w is resized
// to the correct size (if possible and necessary). In case \a A is a row-major matrix, the left
// eigenvectors are returned in the rows of \a A, in case \a A is a column-major matrix, the right
// eigenvectors are returned in the columns of \a A.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given \a jobz argument is neither \c 'V' nor \c 'N';
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last function throws an
// exception in case of an error.
//
// Via the following functions, which wrap the LAPACK functions \c cheevx() and \c zheevx(), it
// is possible to compute a subset of eigenvalues and/or eigenvectors of an Hermitian matrix:
\code
namespace blaze {
void heevx( char jobz, char range, char uplo, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, float abstol, int* m, float* w, complex<float>* Z, int ldz, complex<float>* work, int lwork, float* rwork, int* iwork, int* ifail, int* info );
void heevx( char jobz, char range, char uplo, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, double abstol, int* m, double* w, complex<double>* Z, int ldz, complex<double>* work, int lwork, double* rwork, int* iwork, int* ifail, int* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t heevx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& w, char uplo, ST low, ST upp );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2 >
size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo );
template< typename MT1, bool SO1, typename VT, bool TF, typename MT2, bool SO2, typename ST >
size_t heevx( DenseMatrix<MT1,SO1>& A, DenseVector<VT,TF>& w, DenseMatrix<MT2,SO2>& Z, char uplo, ST low, ST upp );
} // namespace blaze
\endcode
// The number of eigenvalues to be computed is specified by the lower bound \c low and the upper
// bound \c upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp are of integral type, the function computes all eigenvalues in the
// index range \f$[low..upp]\f$. The \a num resulting real eigenvalues are stored in ascending
// order in the given vector \a w, which is either resized (if possible) or expected to be a
// \a num-dimensional vector. The eigenvectors are returned in the rows of \a Z in case \a Z is
// row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix. \a Z is
// resized (if possible) or expected to be a \a num-by-\a n row-major matrix or a \a n-by-\a num
// column-major matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all eigenvalues
// in the half-open interval \f$(low..upp]\f$. The resulting real eigenvalues are stored in
// ascending order in the given vector \a w, which is either resized (if possible) or expected
// to be an \a n-dimensional vector. The eigenvectors are returned in the rows of \a Z in case
// \a Z is a row-major matrix and in the columns of \a Z in case \a Z is a column-major matrix.
// \a Z is resized (if possible) or expected to be a \a n-by-\a n matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a A is not a square matrix;
// - ... the given vector \a w is a fixed size vector and the size doesn't match;
// - ... the given matrix \a Z is a fixed size matrix and the dimensions don't match;
// - ... the given \a uplo argument is neither \c 'L' nor \c 'U';
// - ... the eigenvalue computation fails.
//
// The first two functions report failure via the \c info argument, the last four functions throw
// an exception in case of an error.
//
//
// \n \section lapack_singular_values Singular Values/Singular Vectors
//
// The following functions provide an interface for the LAPACK functions \c sgesvd(), \c dgesvd(),
// \c cgesvd(), and \c zgesvd(), which perform a singular value decomposition (SVD) on the given
// general matrix:
\code
namespace blaze {
void gesvd( char jobu, char jobv, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* info );
void gesvd( char jobu, char jobv, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* info );
void gesvd( char jobu, char jobv, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* info );
void gesvd( char jobu, char jobv, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesvd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, char jobu, char jobv );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobu, char jobv );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2 >
void gesvd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobu, char jobv );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
void gesvd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobu, char jobv );
} // namespace blaze
\endcode
// Alternatively, the following functions can be used, which provide an interface to the LAPACK
// functions \c sgesdd(), \c dgesdd(), \c cgesdd(), and \c zgesdd(). In contrast to the \c gesvd()
// functions they compute the singular value decomposition (SVD) of the given general matrix by
// applying a divide-and-conquer strategy for the computation of the left and right singular
// vectors:
\code
namespace blaze {
void gesdd( char jobz, int m, int n, float* A, int lda, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info );
void gesdd( char jobz, int m, int n, double* A, int lda, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info );
void gesdd( char jobz, int m, int n, complex<float>* A, int lda, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info );
void gesdd( char jobz, int m, int n, complex<double>* A, int lda, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
void gesdd( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, char jobz );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
void gesdd( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, char jobz );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
void gesdd( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, char jobz );
} // namespace blaze
\endcode
// The resulting decomposition has the form
\f[ A = U \cdot S \cdot V, \f]
// where \a S is a \a m-by-\a n matrix, which is zero except for its min(\a m,\a n) diagonal
// elements, \a U is an \a m-by-\a m orthogonal matrix, and \a V is a \a n-by-\a n orthogonal
// matrix. The diagonal elements of \a S are the singular values of \a A, the first min(\a m,\a n)
// columns of \a U and rows of \a V are the left and right singular vectors of \a A, respectively.
//
// The resulting min(\a m,\a n) real and non-negative singular values are returned in descending
// order in the vector \a s, which is resized to the correct size (if possible and necessary).
//
// Via the following functions, which wrap the LAPACK functions \c sgesvdx(), \c dgesvdx(),
// \c cgesvdx(), and \c zgesvdx(), it is possible to compute a subset of singular values and/or
// vectors:
\code
namespace blaze {
void gesvdx( char jobu, char jobv, char range, int m, int n, float* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, float* U, int ldu, float* V, int ldv, float* work, int lwork, int* iwork, int* info );
void gesvdx( char jobu, char jobv, char range, int m, int n, double* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, double* U, int ldu, double* V, int ldv, double* work, int lwork, int* iwork, int* info );
void gesvdx( char jobu, char jobv, char range, int m, int n, complex<float>* A, int lda, float vl, float vu, int il, int iu, int* ns, float* s, complex<float>* U, int ldu, complex<float>* V, int ldv, complex<float>* work, int lwork, float* rwork, int* iwork, int* info );
void gesvdx( char jobu, char jobv, char range, int m, int n, complex<double>* A, int lda, double vl, double vu, int il, int iu, int* ns, double* s, complex<double>* U, int ldu, complex<double>* V, int ldv, complex<double>* work, int lwork, double* rwork, int* iwork, int* info );
template< typename MT, bool SO, typename VT, bool TF >
size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s );
template< typename MT, bool SO, typename VT, bool TF, typename ST >
size_t gesvdx( DenseMatrix<MT,SO>& A, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, ST low, ST upp );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2 >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V );
template< typename MT1, bool SO, typename VT, bool TF, typename MT2, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseVector<VT,TF>& s, DenseMatrix<MT2,SO>& V, ST low, ST upp );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3 >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V );
template< typename MT1, bool SO, typename MT2, typename VT, bool TF, typename MT3, typename ST >
size_t gesvdx( DenseMatrix<MT1,SO>& A, DenseMatrix<MT2,SO>& U, DenseVector<VT,TF>& s, DenseMatrix<MT3,SO>& V, ST low, ST upp );
} // namespace blaze
\endcode
// The number of singular values to be computed is specified by the lower bound \a low and the
// upper bound \a upp, which either form an integral or a floating point range.
//
// In case \a low and \a upp form are of integral type, the function computes all singular values
// in the index range \f$[low..upp]\f$. The \a num resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a \a num-dimensional vector. The resulting left singular vectors are stored
// in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-\a num matrix. The resulting right singular vectors are stored in the given matrix \a V,
// which is either resized (if possible) or expected to be a \a num-by-\a n matrix.
//
// In case \a low and \a upp are of floating point type, the function computes all singular values
// in the half-open interval \f$(low..upp]\f$. The resulting real and non-negative singular values
// are stored in descending order in the given vector \a s, which is either resized (if possible)
// or expected to be a min(\a m,\a n)-dimensional vector. The resulting left singular vectors are
// stored in the given matrix \a U, which is either resized (if possible) or expected to be a
// \a m-by-min(\a m,\a n) matrix. The resulting right singular vectors are stored in the given
// matrix \a V, which is either resized (if possible) or expected to be a min(\a m,\a n)-by-\a n
// matrix.
//
// The functions fail if ...
//
// - ... the given matrix \a U is a fixed size matrix and the dimensions don't match;
// - ... the given vector \a s is a fixed size vector and the size doesn't match;
// - ... the given matrix \a V is a fixed size matrix and the dimensions don't match;
// - ... the given scalar values don't form a proper range;
// - ... the singular value decomposition fails.
//
// The first four functions report failure via the \c info argument, the remaining functions throw
// an exception in case of an error.
//
//
// \n Previous: \ref blas_functions Next: \ref block_vectors_and_matrices \n
*/
//*************************************************************************************************
//**Block Vectors and Matrices*********************************************************************
/*!\page block_vectors_and_matrices Block Vectors and Matrices
//
// \tableofcontents
//
//
// \n \section block_vectors_and_matrices_general General Concepts
// <hr>
//
// In addition to fundamental element types, the \b Blaze library supports vectors and matrices
// with non-fundamental element type. For instance, it is possible to define block matrices by
// using a matrix type as the element type:
\code
using blaze::DynamicMatrix;
using blaze::DynamicVector;
using blaze::rowMajor;
using blaze::columnVector;
DynamicMatrix< DynamicMatrix<double,rowMajor>, rowMajor > A;
DynamicVector< DynamicVector<double,columnVector >, columnVector > x, y;
// ... Resizing and initialization
y = A * x;
\endcode
// The matrix/vector multiplication in this example runs fully parallel and uses vectorization
// for every inner matrix/vector multiplication and vector addition.
//
//
// \n \section block_vectors_and_matrices_pitfalls Pitfalls
// <hr>
//
// The only thing to keep in mind when using non-fundamental element types is that all operations
// between the elements have to be well defined. More specifically, the size of vector and matrix
// elements has to match. The attempt to combine two non-matching elements results in either a
// compilation error (in case of statically sized elements) or an exception (for dynamically sized
// elements):
\code
DynamicVector< StaticVector<int,2UL> > a;
DynamicVector< StaticVector<int,3UL> > b;
DynamicVector< DynamicVector<int> > c( a + b ); // Compilation error: element size doesn't match
\endcode
// Therefore please don't forget that dynamically sized elements (e.g. \c blaze::DynamicVector,
// \c blaze::HybridVector, \c blaze::DynamicMatrix, \c blaze::HybridMatrix, ...) need to be sized
// accordingly upfront.
//
//
// \n \section block_vectors_and_matrices_example Example
// <hr>
//
// The following example demonstrates a complete multiplication between a statically sized block
// matrix and block vector:
\code
// ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) )
// ( ( 1 1 ) ( 2 2 ) ) ( ( 1 ) ) ( ( 10 ) )
// ( ) * ( ) = ( )
// ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) )
// ( ( 3 3 ) ( 4 4 ) ) ( ( 2 ) ) ( ( 22 ) )
typedef StaticMatrix<int,2UL,2UL,rowMajor> M2x2;
typedef StaticVector<int,2UL,columnVector> V2;
DynamicMatrix<M2x2,rowMajor> A{ { M2x2(1), M2x2(2) }
{ M2x2(3), M2x2(4) } };
DynamicVector<V2,columnVector> x{ V2(1), V2(2) };
DynamicVector<V2,columnVector> y( A * x );
\endcode
// \n Previous: \ref lapack_functions Next: \ref intra_statement_optimization \n
*/
//*************************************************************************************************
//**Intra-Statement Optimization*******************************************************************
/*!\page intra_statement_optimization Intra-Statement Optimization
//
// One of the prime features of the \b Blaze library is the automatic intra-statement optimization.
// In order to optimize the overall performance of every single statement \b Blaze attempts to
// rearrange the operands based on their types. For instance, the following addition of dense and
// sparse vectors
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + s1 + d2;
\endcode
// is automatically rearranged and evaluated as
\code
// ...
d3 = d1 + d2 + s1; // <- Note that s1 and d2 have been rearranged
\endcode
// This order of operands is highly favorable for the overall performance since the addition of
// the two dense vectors \c d1 and \c d2 can be handled much more efficiently in a vectorized
// fashion.
//
// This intra-statement optimization can have a tremendous effect on the performance of a statement.
// Consider for instance the following computation:
\code
blaze::DynamicMatrix<double> A, B;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
y = A * B * x;
\endcode
// Since multiplications are evaluated from left to right, this statement would result in a
// matrix/matrix multiplication, followed by a matrix/vector multiplication. However, if the
// right subexpression is evaluated first, the performance can be dramatically improved since the
// matrix/matrix multiplication can be avoided in favor of a second matrix/vector multiplication.
// The \b Blaze library exploits this by automatically restructuring the expression such that the
// right multiplication is evaluated first:
\code
// ...
y = A * ( B * x );
\endcode
// Note however that although this intra-statement optimization may result in a measurable or
// even significant performance improvement, this behavior may be undesirable for several reasons,
// for instance because of numerical stability. Therefore, in case the order of evaluation matters,
// the best solution is to be explicit and to separate a statement into several statements:
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + s1; // Compute the dense vector/sparse vector addition first ...
d3 += d2; // ... and afterwards add the second dense vector
\endcode
\code
// ...
blaze::DynamicMatrix<double> A, B, C;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
C = A * B; // Compute the left-hand side matrix-matrix multiplication first ...
y = C * x; // ... before the right-hand side matrix-vector multiplication
\endcode
// Alternatively, it is also possible to use the \c eval() function to fix the order of evaluation:
\code
blaze::DynamicVector<double> d1, d2, d3;
blaze::CompressedVector<double> s1;
// ... Resizing and initialization
d3 = d1 + eval( s1 + d2 );
\endcode
\code
blaze::DynamicMatrix<double> A, B;
blaze::DynamicVector<double> x, y;
// ... Resizing and initialization
y = eval( A * B ) * x;
\endcode
// \n Previous: \ref block_vectors_and_matrices
*/
//*************************************************************************************************
#endif
|
dsposv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zcposv.c, mixed zc -> ds, Fri Sep 28 17:38:17 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "core_lapack.h"
#include <math.h>
#include <omp.h>
#include <stdbool.h>
/***************************************************************************//**
*
* @ingroup plasma_posv
*
* Computes the solution to a system of linear equations A * X = B, where A is
* an n-by-n symmetric positive definite matrix and X and B are n-by-nrhs matrices.
*
* plasma_dsposv first factorizes the matrix using plasma_spotrf and uses
* this factorization within an iterative refinement procedure to produce a
* solution with COMPLEX*16 normwise backward error quality (see below). If
* the approach fails the method falls back to a COMPLEX*16 factorization and
* solve.
*
* The iterative refinement is not going to be a winning strategy if
* the ratio COMPLEX performance over COMPLEX*16 performance is too
* small. A reasonable strategy should take the number of right-hand
* sides and the size of the matrix into account. This might be done
* with a call to ILAENV in the future. Up to now, we always try
* iterative refinement.
*
* The iterative refinement process is stopped if iter > itermax or
* for all the RHS we have: Rnorm < sqrt(n)*Xnorm*Anorm*eps, where:
*
* - iter is the number of the current iteration in the iterative refinement
* process
* - Rnorm is the Infinity-norm of the residual
* - Xnorm is the Infinity-norm of the solution
* - Anorm is the Infinity-operator-norm of the matrix A
* - eps is the machine epsilon returned by DLAMCH('Epsilon').
* The values itermax is fixed to 30.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies whether the matrix A is upper or lower triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The number of linear equations, i.e., the order of the matrix A.
* n >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of columns of the
* matrix B. nrhs >= 0.
*
* @param[in,out] pA
* The n-by-n symmetric positive definite coefficient matrix A.
* If uplo = PlasmaUpper, the leading n-by-n upper triangular part of
* A contains the upper triangular part of the matrix A, and the
* strictly lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading n-by-n lower triangular part of
* A contains the lower triangular part of the matrix A, and the
* strictly upper triangular part of A is not referenced.
* On exit, contains the lower Cholesky factor matrix L,
* if uplo == PlasmaLower and upper Cholesky factor (L^T),
* otherwise.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[in] pB
* The n-by-nrhs matrix of right hand side matrix B.
* This matrix remains unchanged.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
* @param[out] pX
* If return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldx
* The leading dimension of the array X. ldx >= max(1,n).
*
* @param[out] iter
* The number of the iterations in the iterative refinement
* process, needed for the convergence. If failed, it is set
* to be -(1+itermax), where itermax = 30.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dsposv
* @sa plasma_dsposv
* @sa plasma_zposv
*
******************************************************************************/
int plasma_dsposv(plasma_enum_t uplo, int n, int nrhs,
double *pA, int lda,
double *pB, int ldb,
double *pX, int ldx, int *iter)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -3;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -5;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -7;
}
if (ldx < imax(1, n)) {
plasma_error("illegal value of ldx");
return -9;
}
// quick return
*iter = 0;
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_potrf(plasma, PlasmaRealFloat, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
plasma_desc_t X;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &X);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
return retval;
}
// Create additional tile matrices.
plasma_desc_t R, As, Xs;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
B.m, B.n, 0, 0, B.m, B.n, &R);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
A.m, A.n, 0, 0, A.m, A.n, &As);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
X.m, X.n, 0, 0, X.m, X.n, &Xs);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
plasma_desc_destroy(&As);
return retval;
}
// Allocate tiled workspace for Infinity norm calculations.
size_t lwork = imax((size_t)A.nt*A.n+A.n, (size_t)X.mt*X.n+(size_t)R.mt*R.n);
double *work = (double*)malloc(((size_t)lwork)*sizeof(double));
double *Rnorm = (double*)malloc(((size_t)R.n)*sizeof(double));
double *Xnorm = (double*)malloc(((size_t)X.n)*sizeof(double));
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate matrices to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call tile async function.
plasma_omp_dsposv(uplo, A, B, X, As, Xs, R, work, Rnorm, Xnorm,
iter, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(X, pX, ldx, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&X);
plasma_desc_destroy(&R);
plasma_desc_destroy(&As);
plasma_desc_destroy(&Xs);
free(work);
free(Rnorm);
free(Xnorm);
// Return status.
int status = sequence.status;
return status;
}
// Checks, that convergence criterion is true for all columns of R and X
static bool conv(double *Rnorm, double *Xnorm, int n, double cte)
{
bool value = true;
for (int i = 0; i < n; i++) {
if (Rnorm[i] > Xnorm[i] * cte) {
value = false;
break;
}
}
return value;
}
/***************************************************************************//**
*
* @ingroup plasma_posv
*
* Solves a symmetric positive definite system using iterative refinement
* with the Cholesky factor computed using plasma_spotrf.
* Non-blocking tile version of plasma_dsposv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies whether the matrix A is upper or lower triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in,out] X
* Descriptor of matrix X.
*
* @param[out] As
* Descriptor of auxiliary matrix A in single complex precision.
*
* @param[out] Xs
* Descriptor of auxiliary matrix X in single complex precision.
*
* @param[out] R
* Descriptor of auxiliary remainder matrix R.
*
* @param[out] work
* Workspace needed to compute infinity norm of the matrix A.
*
* @param[out] Rnorm
* Workspace needed to store the max value in each of resudual vectors.
*
* @param[out] Xnorm
* Workspace needed to store the max value in each of currenct solution
* vectors.
*
* @param[out] iter
* The number of the iterations in the iterative refinement
* process, needed for the convergence. If failed, it is set
* to be -(1+itermax), where itermax = 30.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PLASMA_SUCCESS (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dsposv
* @sa plasma_omp_dsposv
* @sa plasma_omp_zposv
*
******************************************************************************/
void plasma_omp_dsposv(plasma_enum_t uplo,
plasma_desc_t A, plasma_desc_t B, plasma_desc_t X,
plasma_desc_t As, plasma_desc_t Xs, plasma_desc_t R,
double *work, double *Rnorm, double *Xnorm, int *iter,
plasma_sequence_t *sequence,
plasma_request_t *request)
{
const int itermax = 30;
const double zmone = -1.0;
const double zone = 1.0;
*iter = 0;
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(X) != PlasmaSuccess) {
plasma_error("invalid X");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(As) != PlasmaSuccess) {
plasma_error("invalid As");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(Xs) != PlasmaSuccess) {
plasma_error("invalid Xs");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(R) != PlasmaSuccess) {
plasma_error("invalid R");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// workspace for damax
double *workX = work;
double *workR = &work[X.mt*X.n];
// Compute some constants.
double cte;
double eps = LAPACKE_dlamch_work('E');
double Anorm;
plasma_pdlansy(PlasmaInfNorm, uplo, A, work, &Anorm, sequence, request);
// Convert B from double to single precision, store result in Xs.
plasma_pdlag2s(B, Xs, sequence, request);
// Convert A from double to single precision, store result in As.
// TODO: need dlat2s
plasma_pdlag2s(A, As, sequence, request);
// Compute the Cholesky factorization of As.
plasma_pspotrf(uplo, As, sequence, request);
// Solve the system As * Xs = Bs.
plasma_pstrsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans,
PlasmaNonUnit, 1.0, As, Xs, sequence, request);
plasma_pstrsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans,
PlasmaNonUnit, 1.0, As, Xs, sequence, request);
// Convert Xs to double precision.
plasma_pslag2d(Xs, X, sequence, request);
// Compute R = B - A * X.
plasma_pdlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request);
plasma_pdsymm(PlasmaLeft, uplo, zmone, A, X, zone, R, sequence, request);
// Check whether the nrhs normwise backward error satisfies the
// stopping criterion. If yes, set iter=0 and return.
plasma_pdamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request);
plasma_pdamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request);
#pragma omp taskwait
{
cte = Anorm * eps * sqrt((double)A.n);
if (conv(Rnorm, Xnorm, R.n, cte)) {
*iter = 0;
return;
}
}
// iterative refinement
for (int iiter = 0; iiter < itermax; iiter++) {
// Convert R from double to single precision, store result in Xs.
plasma_pdlag2s(R, Xs, sequence, request);
// Solve the system As * Xs = Rs.
plasma_pstrsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans,
PlasmaNonUnit, 1.0, As, Xs, sequence, request);
plasma_pstrsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans,
PlasmaNonUnit, 1.0, As, Xs, sequence, request);
// Convert Xs back to double precision and update the current iterate.
plasma_pslag2d(Xs, R, sequence, request);
plasma_pdgeadd(PlasmaNoTrans, zone, R, zone, X, sequence, request);
// Compute R = B - A * X.
plasma_pdlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request);
plasma_pdsymm(PlasmaLeft, uplo, zmone, A, X, zone, R,
sequence, request);
// Check whether nrhs normwise backward error satisfies the
// stopping criterion. If yes, set iter = iiter > 0 and return.
plasma_pdamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request);
plasma_pdamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request);
#pragma omp taskwait
{
if (conv(Rnorm, Xnorm, R.n, cte)) {
*iter = iiter+1;
return;
}
}
}
// If we are at this place of the code, this is because we have performed
// iter = itermax iterations and never satisfied the stopping criterion,
// set up the iter flag accordingly and follow up with double precision
// routine.
*iter = -itermax - 1;
// Compute Cholesky factorization of A.
plasma_pdpotrf(uplo, A, sequence, request);
// Solve the system A * X = B.
plasma_pdlacpy(PlasmaGeneral, PlasmaNoTrans, B, X, sequence, request);
plasma_pdtrsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans,
PlasmaNonUnit, 1.0, A, X, sequence, request);
plasma_pdtrsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans,
PlasmaNonUnit, 1.0, A, X, sequence, request);
}
|
matrix.c |
#include "matrix.h"
/*
* matrix.c
*
* Copyright (c) 2014, Rafat Hussain
* License : BSD 3-Clause
* See COPYRIGHT for more details
*/
typedef struct {
double* a;
int b;
} vipair;
double macheps() {
double macheps;
macheps = 1.0;
while ((macheps + 1.0) > 1.0) {
macheps = macheps / 2.0;
}
macheps = macheps * 2;
return macheps;
}
int compare (const void* ind1, const void* ind2)
{
if (*((vipair *)ind1)->a > *((vipair *)ind2)->a)
return -1;
else if (*((vipair *)ind1)->a < *((vipair *)ind2)->a)
return 1;
else
return 0;
}
void sort1d(double* v,int N, int* pos)
{
vipair* val = NULL;
int i;
if (N <= 0)
return;
val = malloc(sizeof(vipair) * N);
for (i = 0; i < N; ++i) {
val[i].a = &v[i];
val[i].b = i;
}
qsort(val, N, sizeof(vipair), compare);
for (i = 0; i < N; ++i)
pos[i] = val[i].b;
free(val);
}
double array_max_abs(double *array,int N) {
int i;
double m = 0.0;
for (i = 0; i < N;++i) {
if (fabs(array[i]) > m ) {
m = fabs(array[i]);
}
}
return m;
}
double array_max(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] > m ) {
m = array[i];
}
}
return m;
}
double array_min(double *array,int N) {
int i;
double m = array[0];
for (i = 1; i < N;++i) {
if (array[i] < m ) {
m = array[i];
}
}
return m;
}
void dtranspose(double *sig, int rows, int cols,double *col) {
int max,ud,i,k;
if (rows >= cols) {
max = cols;
} else {
max = rows;
}
ud = 0;
for (i= -rows + 1; i < cols; i++) {
if (i <= 0) {
ud++;
if (ud >= max)
ud = max;
for (k = 0; k < ud; k++) {
col[k*rows+k-i] = sig[(k-i)*cols+k];
}
} else {
if (i - cols + rows > 0) {
ud--;
if (ud >= max)
ud = max;
}
for (k = 0; k < ud; k++) {
col[(k+i)*rows+k] = sig[k*cols+k+i];
}
}
}
}
void stranspose(double *sig, int rows, int cols,double *col) {
int t,u;
register int i,j;
#pragma omp parallel for private(i,j,t,u)
for (i=0; i < rows; i++) {
t = i * cols;
u = 0;
for (j=0; j < cols; j++) {
col[u+i] = sig[j+t];
u+=rows;
}
}
}
void rtranspose(double *m, int rows, int cols,double *n, int r, int c) {
register int i,j;
int rm,cm;
int rm1,cm1,rm2,cm2;
int block;
block = (int) BLOCKSIZE;
if (rows <= block && cols <= block) {
for (i = 0; i < rows; ++i) {
for (j = 0; j < cols; ++j) {
n[i+j*r] = m[j+i*c];
//cout << *(n+i+j*r) << " ";
}
}
//cout << endl;
} else if (cols >= rows) {
rm = rows;
cm1 = (int) ceil((double) cols/2.0);
cm2 = cols - cm1;
rtranspose(m,rm,cm1,n,r,c);
rtranspose(m+cm1,rm,cm2,n+cm1*r,r,c);
} else if (rows > cols) {
rm1 = (int) ceil((double) rows/2.0);
rm2 = rows - rm1;
cm = cols;
rtranspose(m,rm1,cm,n,r,c);
rtranspose(m+rm1*c,rm2,cm,n+rm1,r,c);
}
}
void ctranspose(double *sig, int rows, int cols,double *col) {
int r,c;
int block;
block = (int) BLOCKSIZE;
r= rows;
c = cols;
if (rows >= block || cols >= block) {
rtranspose(sig,rows,cols,col,r,c);
} else {
stranspose(sig,rows,cols,col);
}
}
void mtranspose(double *sig, int rows, int cols,double *col) {
int block;
block = (int) BLOCKSIZE * 16;
if (rows >= block && cols >= block) {
ctranspose(sig,rows,cols,col);
} else {
stranspose(sig,rows,cols,col);
}
}
void mdisplay(double *A, int row, int col) {
int i,j;
printf("\n MATRIX Order : %d X %d \n \n",row,col);
for (i = 0; i < row; i++) {
printf("R%d: ",i);
for ( j = 0; j < col;j++) {
printf("%g ",A[i*col + j]);
}
printf(":R%d \n",i);
}
}
void madd(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A + B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] + B[i];
}
}
void msub(double* A, double* B, double* C,int rows,int cols) {
int N,i;
/*
* C = A - B . All matrices have identical dimensions rows X cols
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N; ++i) {
C[i] = A[i] - B[i];
}
}
void scale(double *A, int rows, int cols, double alpha) {
int N,i;
/*
* A = alpha * A
* Matrix A is overwritten.
*/
N = rows * cols;
#pragma omp parallel for
for (i = 0; i < N;++i) {
A[i] = alpha * A[i];
}
}
void nmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * B[j + k * cb];
}
}
}
}
void tmult(double* A, double* B, double* C,int ra,int ca, int cb) {
register int i,j,k;
int u,v,t,rb;
double *BT;
BT = (double*) malloc(sizeof(double) * ca * cb);
/*
* C = A * B , where A is a ra*ca matric while B is a rb*cb
* with ca = rb
* Matrix C is a ra*cb matrix
*/
mtranspose(B,ca,cb,BT);
rb = ca;
#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < ra; ++i) {
for (j = 0; j < cb; ++j) {
v = i * rb;
u = i *cb;
t = j + u;
C[t] = 0.;
for (k = 0; k < rb;++k) {
C[t] += A[k + v] * BT[k + j * rb];
}
}
}
free(BT);
}
void recmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
int m2,n2,p2;
register int i,j,k;
int u,v,t;
if (m + n + p <= CUTOFF) {
//#pragma omp parallel for private(i,j,k,v,u,t)
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sB;
u = i * sC;
t = j + u;
for (k = 0; k < n;++k) {
C[t] += A[k + v] * B[j + k * sC];
}
}
}
} else if (m >= n && m >= p) {
m2 = (int) ceil((double) m / 2.0);
recmult(A,B,C,m2,n,p,sA,sB,sC);
recmult(A + m2*sB,B,C + m2*sC,m-m2,n,p,sA,sB,sC);
} else if (n >= m && n >= p) {
n2 = (int) ceil((double) n / 2.0);
recmult(A,B,C,m,n2,p,sA,sB,sC);
recmult(A+n2,B+n2*sC,C,m,n-n2,p,sA,sB,sC);
} else if (p >= m && p >= n) {
p2 = (int) ceil((double) p / 2.0);
recmult(A,B,C,m,n,p2,sA,sB,sC);
recmult(A,B+p2,C+p2,m,n,p-p2,sA,sB,sC);
}
}
void rmult(double* A, double* B, double* C,int m,int n, int p) {
int strA,strB,strC;
int N;
register int i;
strA = m;
strB = n;
strC = p;
N = m * p;
for(i = 0; i < N; ++i) {
C[i] = 0.;
}
recmult(A,B,C,m,n,p,strA,strB,strC);
}
int findrec(int *a, int *b, int *c) {
int rec;
double da,db,dc,mul;
da = (double) *a;
db = (double) *b;
dc = (double) *c;
rec = 0;
mul = 1.;
while (da + db + dc > (double) CUTOFF) {
rec++;
mul *= 2;
da = ceil(da/2.);
db = ceil(db/2.);
dc = ceil(dc/2.);
}
*a = (int) da * mul;
*b = (int) db * mul;
*c = (int) dc * mul;
return rec;
}
void add_zero_pad(double *X, int rows, int cols, int zrow, int zcol,double *Y) {
int r,c,i,j,u,v;
r = rows + zrow;
c = cols + zcol;
for (i = 0; i < rows;++i) {
u = i*c;
v = i * cols;
for (j = 0; j < cols;++j) {
Y[u + j] = X[v + j];
}
for (j = cols; j < c;++j) {
Y[u + j] = 0.;
}
}
for (i = rows; i < r;++i) {
u = i*c;
for(j = 0; j < c;++j) {
Y[u + j] = 0.;
}
}
}
void remove_zero_pad(double *Y, int rows, int cols, int zrow, int zcol,double *Z) {
int r,c,i,j,u,v;
r = rows - zrow;
c = cols - zcol;
for (i = 0; i < r; ++i) {
u = i * c;
v = i * cols;
for (j = 0; j < c; ++j) {
Z[j + u] = Y[j + v];
}
}
}
void madd_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
}
void msub_stride(double* A, double* B, double* C,int rows,int cols,int sA,int sB,int sC) {
int i,j,u,v,w;
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
}
void rmadd_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] + B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmadd_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmadd_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmadd_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void rmsub_stride(double* A, double* B, double* C,int rows,int cols,int p,int sA,int sB,int sC) {
int i,j,u,v,w;
if (rows + cols + p <= CUTOFF) {
for (i = 0; i < rows; ++i) {
u = i * sC;
v = i * sA;
w = i * sB;
for(j = 0; j < cols;j++) {
C[j + u] = A[j + v] - B[j + w];
}
}
} else {
rows/=2;cols/=2;p/=2;
rmsub_stride(A,B,C,rows,cols,p,sA,sB,sC);
rmsub_stride(A + cols,B + cols,C + cols,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB,B + rows *sC,C + rows *sC,rows,cols,p,sA,sB,sC);
rmsub_stride(A + rows *sB + cols,B + rows *sC + cols,C + rows *sC + cols,rows,cols,p,sA,sB,sC);
}
}
void srecmult(double* A, double* B, double* C,int m,int n, int p,int sA,int sB, int sC) {
register int i,j,k;
int u,v,t;
double sum;
double *A1,*B1;
double *a11,*a12,*a21,*a22;
double *b11,*b12,*b21,*b22;
double *c11,*c12,*c21,*c22;
double *m1,*m2,*m3,*m4,*m5,*m6,*m7;
int sm1,sm2,sm3,sm4,sm5,sm6,sm7;
int sA1,sB1;
if (m + n + p <= CUTOFF) {
for (i = 0; i < m; ++i) {
for (j = 0; j < p; ++j) {
v = i * sA;
u = i * sC;
t = j + u;
sum = 0.;
for (k = 0; k < n;++k) {
sum += A[k + v] * B[j + k * sB];
}
C[t] = sum;
}
}
} else {
m/=2;n/=2;p/=2;
// A size mXn, C size mXp
a11 = A;
a12 = A + n;
a21 = A + m * sA;
a22 = A + n + m * sA;
//B size nXp
b11 = B;
b12 = B + p;
b21 = B + n * sB;
b22 = B + p + n * sB;
//C size mXp
c11 = C;
c12 = C + p;
c21 = C + m * sC;
c22 = C + p + m * sC;
// m matrices have dimension m X p each. See http://en.wikipedia.org/wiki/Strassen_algorithm
m1 = (double*) malloc(sizeof(double) *m * p);
sm1 = p;
m3 = (double*) malloc(sizeof(double) *m * p);
sm3 = p;
m4 = (double*) malloc(sizeof(double) *m * p);
sm4 = p;
m2 = c21;
sm2 = sC;
m5 = c12;
sm5 = sC;
m6 = c22;
sm6 = sC;
m7 = c11;
sm7 = sC;
//m1
sA1 = n;
sB1 = p;
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
madd_stride(a11,a22,A1,m,n,sA,sA,sA1);
madd_stride(b11,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m1,m,n,p,sA1,sB1,sm1);
free(A1);
free(B1);
//m2
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a21,a22,A1,m,n,sA,sA,sA1);
srecmult(A1,b11,m2,m,n,p,sA1,sB,sm2);
free(A1);
//m3
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b12,b22,B1,n,p,sB,sB,sB1);
srecmult(a11,B1,m3,m,n,p,sA,sB1,sm3);
free(B1);
//m4
B1 = (double*) malloc(sizeof(double) * n * p);
//rmsub_stride(B + p,B + p + n * sC,B1,n,p,m,sC,sC,sC/2);
msub_stride(b21,b11,B1,n,p,sB,sB,sB1);
srecmult(a22,B1,m4,m,n,p,sA,sB1,sm4);
free(B1);
//m5
A1 = (double*) malloc(sizeof(double) * m * n);
madd_stride(a11,a12,A1,m,n,sA,sA,sA1);
srecmult(A1,b22,m5,m,n,p,sA1,sB,sm5);
free(A1);
//m6
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a21,a11,A1,m,n,sA,sA,sA1);
madd_stride(b11,b12,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m6,m,n,p,sA1,sB1,sm6);
free(A1);
free(B1);
//m7
A1 = (double*) malloc(sizeof(double) * m * n);
B1 = (double*) malloc(sizeof(double) * n * p);
msub_stride(a12,a22,A1,m,n,sA,sA,sA1);
madd_stride(b21,b22,B1,n,p,sB,sB,sB1);
srecmult(A1,B1,m7,m,n,p,sA1,sB1,sm7);
free(A1);
free(B1);
// c11
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m7,m7,m,p,sm1,sm7,sm7);
msub_stride(m4,m5,A1,m,p,sm4,sm5,sA1);
madd_stride(m7,A1,m7,m,p,sm7,sA1,sm7);
free(A1);
// c22
A1 = (double*) malloc(sizeof(double) * m * p);
sA1 = p;
madd_stride(m1,m6,m6,m,p,sm1,sm6,sm6);
msub_stride(m3,m2,A1,m,p,sm3,sm2,sA1);
madd_stride(m6,A1,m6,m,p,sm6,sA1,sm6);
free(A1);
//c12
madd_stride(m3,m5,m5,m,p,sm3,sm5,sm5);
//c21
madd_stride(m4,m2,m2,m,p,sm4,sm2,sm2);
free(m1);
free(m3);
free(m4);
}
}
void smult(double* A, double* B, double* C,int m,int n, int p) {
int a,b,c,nrec;
double *X,*Y,*Z,*P;
a = m;
b = n;
c = p;
nrec = findrec(&a,&b,&c);
X = (double*) malloc(sizeof(double) * a * b);
Y = (double*) malloc(sizeof(double) * b * c);
Z = (double*) malloc(sizeof(double) * a * c);
P = (double*) malloc(sizeof(double) * (a/2) * (c/2));
add_zero_pad(A,m,n,a-m,b-n,X);
add_zero_pad(B,n,p,b-n,c-p,Y);
srecmult(X,Y,Z,a,b,c,b,c,c);
// Memory allocation needs work
remove_zero_pad(Z,a,c,a-m,c-p,C);
// free X,Y,Z
free(X);
free(Y);
free(Z);
free(P);
}
void mmult(double* A, double* B, double* C,int m,int n, int p) {
if (m+n+p <= CUTOFF/2) {
nmult(A,B,C,m,n,p);
} else {
smult(A,B,C,m,n,p);
}
}
static int pludecomp(double *A,int N,int *ipiv) {
int k,j,l,c1,c2,mind,tempi;
double ld,mult,mval,temp;
for(k=0;k < N;++k)
ipiv[k] = k;
for(k = 0; k < N-1; ++k) {
//c2 = k*N;
mval = fabs(A[k*N + k]);
mind = k;
for (j=k+1; j < N;++j) {
if (mval < fabs(A[j*N + k])) {
mval = A[j*N + k];
mind = j;
}
}
if ( mind != k) {
c1 = k *N;
c2 = mind * N;
tempi = ipiv[mind];
ipiv[mind] = ipiv[k];
ipiv[k] = tempi;
for (j = 0; j < N;j++) {
temp = A[c1 + j];
*(A + c1 + j) = *(A + c2 + j);
*(A + c2 + j) = temp;
}
}
c2 = k*N;
ld = A[c2 + k];
if (ld != 0.) {
for (j = k+1; j < N; ++j) {
c1 = j*N;
mult = A[c1+k] /= ld;
//printf("\n k %d j %d mult %lf \n",k,j,mult);
for(l = k+1; l < N; ++l) {
A[c1+l] -= mult * A[c2 + l];
}
}
}
}
return 0;
}
void ludecomp(double *A,int N,int *ipiv) {
pludecomp(A,N,ipiv);
}
void linsolve(double *A,int N,double *b,int *ipiv,double *x) {
int i,j,c1,l;
double *y;
double sum;
y = (double*) malloc(sizeof(double) *N);
/*
* Two step Solution L * U * x = b
* Let U*x = y
* Solve L * y = b for y (Forward Substitution
* Solve U * x = b for x (Back Substitution)
*/
for(i = 0; i < N;++i) {
y[i] = 0.;
x[i] = 0.;
if ( A[i*N + i] == 0.) {
printf("The Matrix system does not have a unique solution");
exit(1);
}
//printf("\n B %d",ipiv[i]);
}
// Forward Substitution
y[0] = b[ipiv[0]];
for(i = 1; i < N; ++i) {
sum = 0.;
c1 = i*N;
for(j = 0; j < i; ++j) {
sum += y[j] * A[c1 + j];
}
y[i] = b[ipiv[i]] - sum;
}
// Back Substitution
x[N - 1] = y[N - 1]/A[N * N - 1];
for (i = N - 2; i >= 0; i--) {
sum = 0.;
c1 = i*(N+1);
l=0;
for(j = i+1; j < N;j++) {
l++;
sum += A[c1 + l] * x[j];
}
x[i] = (y[i] - sum) / A[c1];
}
free(y);
}
void minverse(double *A,int N,int *ipiv,double *inv) {
int i,j,stride;
double *col,*x;
col = (double*) malloc(sizeof(double) * N);
x = (double*) malloc(sizeof(double) * N);
for (i = 0; i < N; ++i) {
col[i] = 0.;
x[i] = 0.;
}
for (i = 0; i < N; ++i) {
col[i] = 1.;
linsolve(A,N,col,ipiv,x);
stride = i;
for(j = 0; j < N;++j) {
inv[stride] = x[j];
stride+= N;
}
col[i] = 0.;
}
free(x);
free(col);
}
void eye(double *mat,int N) {
int i,j,t;
for(i = 0;i < N;++i) {
for(j =0; j < N;++j) {
t = i*N;
if (i == j) {
mat[t+j] = 1.;
} else {
mat[t+j] = 0.;
}
}
}
}
static double house_1(double*x,int N,double *v) {
double beta,mu,temp;
double *sigma;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
if (N > 1) {
mmult(x+1,x+1,sigma,1,N-1,1);
} else {
sigma[0] = 0.0;
}
v[0] =1.;
for (i = 1; i < N;++i) {
v[i] = x[i];
}
if (sigma[0] == 0. && x[0] >= 0.) {
beta = 0.;
} else if (sigma[0] == 0. && x[0] < 0.) {
beta = -2.;
}else {
mu = sqrt(sigma[0] + x[0] * x[0]);
if (x[0] <= 0.) {
v[0] = x[0] - mu;
} else {
v[0] = - sigma[0] / (x[0] + mu);
}
temp = v[0];
beta = (2.0 * v[0] * v[0]) /(sigma[0] + v[0] * v[0]);
for (i = 0; i < N;++i) {
v[i] /= temp;
}
}
free(sigma);
return beta;
}
double house_2(double*x,int N,double *v) {
double sgn,beta,sc;
double *sigma,*e;
int i;
sigma = (double*) malloc(sizeof(double) * 1);
e = (double*) malloc(sizeof(double) * N);
beta = 2.0;
sgn = 1.0;
mmult(x,x,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
e[0] =1.;
for (i = 1; i < N;++i) {
e[i] = 0.;
}
if (x[0] > 0.) {
sgn = 1.0;
} else if (x[0] < 0.) {
sgn = -1.0;
} else if (x[0] == 0.) {
sgn = 0.;
}
sc = sigma[0] * sgn;
//scale(e,N,1,sc);
e[0] *= sc;
for(i = 0; i < N;++i) {
v[i] = e[i] + x[i];
}
mmult(v,v,sigma,1,N,1);
sigma[0] = sqrt(sigma[0]);
for(i = 0; i < N;++i) {
v[i] = v[i] / sigma[0];
}
free(sigma);
free(e);
return beta;
}
double house(double*x,int N,double *v) {
double beta;
beta = house_1(x,N,v);
return beta;
}
void housemat(double *v, int N,double beta,double *mat) {
double *temp;
temp = (double*) malloc(sizeof(double) * N * N);
eye(mat,N);
mmult(v,v,temp,N,1,N);
scale(temp,N,N,beta);
msub(mat,temp,mat,N,N);
free(temp);
}
void qrdecomp(double *A, int M, int N,double *bvec) {
int j,i,k,u,t;
double *x,*v,*AT,*w;
double beta;
if (M < N) {
printf("M should be greater than or equal to N");
exit(1);
}
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(j = 0; j < N;++j) {
for(i=j;i < M;++i) {
x[i-j] = A[i*N+j];
}
beta = house(x,M-j,v);
bvec[j] = beta;
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = A[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,beta);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
A[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
if (j < M) {
for(i=j+1;i < M;++i) {
A[i*N+j] = v[i-j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void getQR(double *A,int M,int N,double *bvec,double *Q, double *R) {
int i,j,k,t,u;
double *x,*v,*AT,*w;
x = (double*) malloc(sizeof(double) * M);
v = (double*) malloc(sizeof(double) * M);
AT = (double*) malloc(sizeof(double) * M * N);
w = (double*) malloc(sizeof(double) * M * M);
for(i = 0; i < N;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i > j) {
R[t+j] = 0.;
} else {
R[t+j] = A[t+j];
}
}
}
for(i = 0; i < M;++i) {
t = i *N;
for(j = 0; j < N;++j) {
if (i == j) {
Q[t+j] = 1.;
} else {
Q[t+j] = 0.;
}
}
}
for(j = N-1; j >= 0;--j) {
v[0] = 1.;
for(i=j+1;i < M;++i) {
v[i-j] = A[i*N+j];
}
for (i=j; i < M; i++) {
t = i * N;
u = 0;
for (k=j; k < N; k++) {
AT[u+i-j] = Q[k+t];
u+=(M-j);
}
}
mmult(AT,v,w,N-j,M-j,1);
scale(w,N-j,1,bvec[j]);
mmult(v,w,AT,M-j,1,N-j);
for (i=j; i < M; i++) {
t = i *N;
for (k=j; k < N; k++) {
Q[t+k] -= AT[(i-j)*(N-j) + k - j];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void hessenberg(double *A,int N) {
int k,i,j,t,u;
double *x,*v,*AT,*w;
double beta;
x = (double*) malloc(sizeof(double) * N);
v = (double*) malloc(sizeof(double) * N);
AT = (double*) malloc(sizeof(double) * N * N);
w = (double*) malloc(sizeof(double) * N);
for (k = 0; k < N-2;++k) {
for(i=k + 1;i < N;++i) {
x[i-k-1] = A[i*N+k];
//printf("x %lf \n",x[i-k-1]);
}
beta = house(x,N-k-1,v);
for (i=k+1; i < N; i++) {
t = i * N;
u = 0;
for (j=k; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=(N-k-1);
}
}
//mdisplay(AT,N-k,N-k-1);
mmult(AT,v,w,N-k,N-k-1,1);
scale(w,N-k,1,beta);
mmult(v,w,AT,N-k-1,1,N-k);
//mdisplay(AT,N-k-1,N-k);
for (i=k+1; i < N; i++) {
t = i * N;
for (j=k; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-k) + j - k];
}
}
//mdisplay(A,N,N);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
AT[u+j-k-1] = A[t+j];
}
}
//mdisplay(AT,N,N-k-1);
mmult(AT,v,w,N,N-k-1,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,N-k-1);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * (N-k-1);
for (j=k+1; j < N; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
}
free(x);
free(v);
free(AT);
free(w);
}
void francisQR(double *A,int N) {
int m,n,k,q,r,t,u,i,j;
double s,t2,beta;
double *x,*v,*AT,*w;
int NN;
/*
* Reference - Algorithm 7.5.1 Golub,van Loan Matrix Computations 3rd Edition
*/
x = (double*) malloc(sizeof(double) * 3);
v = (double*) malloc(sizeof(double) * 3);
AT = (double*) malloc(sizeof(double) * 3 * N);
w = (double*) malloc(sizeof(double) * N);
n = N-1;
m = n-1;
NN = N*N;
s = A[NN-1] + A[NN-N-2];
t2 = A[NN-1] * A[NN-N-2] - A[NN-2] * A[NN-N-1];
x[0] = A[0]*A[0] + A[1]*A[N] - s*A[0] + t2;
x[1] = A[N]*(A[0] + A[N+1] - s);
x[2] = A[N] * A[N+N+1];
if (N <= 2) {
return;
}
for (k = -1; k < N - 3;++k) {
beta = house(x,3,v);
//mdisplay(x,3,1);
if (k > 0) {
q = k;
} else {
q = 0;
}
//printf("q %d \n",q);
for (i=k+1; i < k+4; i++) {
t = i * N;
u = 0;
for (j=q; j < N; j++) {
AT[u+i-k-1] = A[j+t];
u+=3;
}
}
mmult(AT,v,w,N-q,3,1);
scale(w,N-q,1,beta);
mmult(v,w,AT,3,1,N-q);
for (i=k+1; i < k+4; i++) {
t = i * N;
for (j=q; j < N; j++) {
A[t+j] -= AT[(i-k-1)*(N-q) + j - q];
}
}
//mdisplay(A,N,N);
if (k+4 >= n) {
r = N;
} else {
r = k+4+1;
}
//printf("r %d \n",r);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
AT[u+j-k-1] = A[t+j];
}
}
mmult(AT,v,w,r,3,1);
scale(w,r,1,beta);
mmult(w,v,AT,r,1,3);
//mdisplay(AT,N,N-k-1);
for (i=0; i < r; i++) {
t = i * N;
u = i * 3;
for (j=k+1; j < k+4; j++) {
A[t+j] -= AT[u+j-k-1];
}
}
//mdisplay(A,N,N);
x[0] = A[N*(k+2) + k+1];
x[1] = A[N*(k+3) + k+1];
if (k < n-3) {
x[2] = A[N*(k+4) + k+1];
}
//mdisplay(x,3,1);
}
//mdisplay(x,2,1);
beta = house(x,2,v);
for (i=n-1; i < N; i++) {
t = i * N;
u = 0;
for (j=n-2; j < N; j++) {
AT[u+i-n+1] = A[j+t];
u+=2;
}
}
mmult(AT,v,w,3,2,1);
scale(w,3,1,beta);
mmult(v,w,AT,2,1,3);
for (i=n-1; i < N; i++) {
t = i * N;
for (j=n-2; j < N; j++) {
A[t+j] -= AT[(i-n+1)*3 + j - n + 2];
}
}
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
AT[u+j-n+1] = A[t+j];
}
}
mmult(AT,v,w,N,2,1);
scale(w,N,1,beta);
mmult(w,v,AT,N,1,2);
//mdisplay(AT,N,N-k-1);
for (i=0; i < N; i++) {
t = i * N;
u = i * 2;
for (j=n-1; j < N; j++) {
A[t+j] -= AT[u+j-n+1];
}
}
free(x);
free(v);
free(AT);
free(w);
}
void eig22(double *A, int stride,double *eigre,double *eigim) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,t2,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
eigre[0] = eigre[1] = at11;
eigim[0] = sqrt(-at12 * at21);
eigim[1] = -sqrt(-at12 * at21);
if ( at12*at21 >= 0) {
if (at12 == 0) {
c = 0;
s = 1;
c2 = 0;
s2 = 1;
cs = 0;
} else {
t = sqrt(at21/at12);
t2 = t * t;
cs = t/(1+t2);
c2 = (1+t2);
s2 = t2 /(1+t2);
}
eigim[0] = eigim[1] = 0.0;
eigre[0] = at11 - cs * (at12 + at21);
eigre[1] = at11 + cs * (at12 + at21);
}
}
int francis_iter(double *A, int N, double *H) {
int success,brkpoint;
int i,j,it,p,q,t,u;
double *temp;
success = 0;
brkpoint = 30 * N;
it = 0;
p = N - 1;
temp = (double*) malloc(sizeof(double) * N * N);
for(i = 0; i < N*N;++i) {
H[i] = A[i];
}
hessenberg(H,N);
while (p > 1 && it < brkpoint) {
while (p > 1 && (H[N*p + p-1] == 0 || H[N*(p-1) + p-2] == 0)) {
if (H[N*p + p-1] == 0) {
p--;
} else if (H[N*(p-1) + p-2] == 0) {
p=p-2;
}
}
if (p > 0) {
q = p-1;
while (q > 0 && fabs(H[N*q + q-1]) != 0) {
q--;
}
//printf("%d %d \n",q,p);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
temp[u+j-q] = H[t+j];
}
}
francisQR(temp,p-q+1);
for (i=q; i <= p; i++) {
t = i * N;
u = (i-q) * (p-q+1);
for (j=q; j <= p; j++) {
H[t+j] = temp[u+j-q];
}
}
//mdisplay(H,N,N);
for(i = q; i <= p-1;++i) {
if ( fabs(H[(i+1)*N+i]) <= TOL * (fabs(H[i*N+i]) + fabs(H[(i+1)*N+i+1]) ) ) {
H[(i+1)*N+i] = 0.;
}
}
it++;
//printf("iter %d \n",it);
}
}
if (it == brkpoint) {
success = 0;
} else {
success = 1;
}
free(temp);
return success;
}
static void eig2t(double *A, int stride) {
int N;
double a11,a12,a21,a22,c,s,c2,s2,cs,t1,t,at11,at12,at21,at22;
N = stride;
a11 = A[0];
a12 = A[1];
a21 = A[N];
a22 = A[N+1];
if ( (a12 + a21) == 0) {
c = 1./sqrt(2.0);
s = c;
} else {
t1 = (a11 - a22) / (a12 + a21);
t = t1 /(1. + sqrt(1+t1*t1));
c = 1./sqrt(1 + t*t);
s = c*t;
}
c2 = c*c;
s2 = s*s;
cs = c*s;
at11 = c2 * a11 + s2 * a22 - cs * (a12 + a21);
at12 = c2 * a12 - s2 * a21 + cs * (a11 - a22);
at21 = c2 * a21 - s2 * a12 + cs * (a11 - a22);
at22 = c2 * a22 + s2 * a11 + cs * (a12 + a21);
A[0] = at11;
A[1] = at12;
A[N] = at21;
A[N+1] = at22;
}
void eig(double *A,int N,double *eigre,double *eigim) {
int i,t,u,n;
double *H;
double t1,t2,cs;
H = (double*) malloc(sizeof(double) * N * N);
n = N - 1;
francis_iter(A,N,H);
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
eig2t(H+u+i,N);
i = i +2;
} else {
i++;
}
}
//mdisplay(H,N,N);
i = 0;
while (i < n) {
u = i * N;
t = (i+1)*N;
if (H[t+i] != 0.) {
if (H[u+i+1] * H[t+i] < 0.) {
eigre[i] = H[u+i];
eigre[i+1] = H[t+i+1];
eigim[i] = sqrt(-H[u+i+1] * H[t+i]);
eigim[i+1] = -sqrt(-H[u+i+1] * H[t+i]);
} else {
if (H[u+i+1] == 0.) {
cs = 0.;
} else {
t1 = sqrt(H[t+i]/H[u+i+1]);
t2 = t1 * t1;
cs = t1/(1+t2);
}
eigre[i] = H[u+i] - cs * (H[u+i+1] + H[t+i]);
eigre[i+1] = H[u+i] + cs * (H[u+i+1] + H[t+i]);
eigim[i] = 0.;
eigim[i+1] = 0.;
}
i= i + 2;
} else {
eigre[i] = H[u+i];
eigim[i] = 0.;
i++;
}
}
if (i == n) {
eigre[i] = H[N*N - 1];
eigim[i] = 0.;
}
free(H);
}
static int rcholu(double *A,int N, int stride, double *U22) {
int sc;
int j,i,u,w;
double u11;
if (N == 1) {
if (A[0] > 0) {
A[0] = sqrt(A[0]);
return 0;
} else {
return -1;
}
} else {
if (A[0] < 0) {
return -1;
}
u11 = sqrt(A[0]);
A[0] = u11;
for (j = 1; j < N;++j) {
A[j] /= u11;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
sc = rcholu(A+stride+1,N-1,stride,U22);
if (sc == -1) {
return -1;
}
}
return sc;
}
static int rbcholu(double *A,int N, int stride, double *UB, double *UT) {
int bs,bb,i,j,Nb,t,k,u,v,w,sc;
double *b,*x,*U12,*U12T;
double sum;
bs = (int) BLOCKSIZE;
bb = bs*bs;
if (N <= BLOCKSIZE) {
sc = rcholu(A,N,stride,UB);
if (sc == -1) {
return -1;
}
} else {
Nb = N - bs;
x = (double*) malloc(sizeof(double) * bs);
b = (double*) malloc(sizeof(double) * bs);
U12T = (double*) malloc(sizeof(double) * Nb * bs);
U12 = (double*) malloc(sizeof(double) * Nb * bs);
rcholu(A,bs,stride,UB); // U11
for (i =0; i < bs;++i) {
t = i *stride;
u = 0;
for(j = 0; j < N;++j) {
UT[u+i] = A[j+t];
u += bs;
}
}
for(k = 0; k < Nb;++k) {
u = k * bs;
for(i = 0; i < bs;++i) {
b[i] = UT[bb+u+i];
x[i] = 0.;
}
for (i = 0; i < bs;++i) {
t = i*bs;
sum = 0;
for (j = 0; j < i;++j) {
sum += UT[t+j] * x[j];
}
x[i] = (b[i] - sum) / UT[t+i];
}
v = bs + k;
for(i = 0; i < bs;++i) {
A[v] = x[i];
U12T[u+i] = x[i];
v += stride;
}
}
mtranspose(U12T,Nb,bs,U12);
mmult(U12T,U12,UT,Nb,bs,Nb);
free(U12T);
free(U12);
free(b);
free(x);
for (i = 0; i < Nb; ++i) {
u = bs * stride + bs + i * stride;
w = i * Nb;
for(j = i; j < Nb;j++) {
A[j + u] -= UT[j + w];
}
}
sc = rbcholu(A + bs * stride + bs,Nb,stride,UB,UT);
if (sc == -1) {
return -1;
}
}
return sc;
}
int cholu(double *A, int N) {
int stride,i,j,t,sc;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
sc = rcholu(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
return sc;
}
int bcholu(double *A, int N) {
int stride,i,j,t,b,sc;
double *UB,*UT;
b = (int) BLOCKSIZE;
UT = (double*) malloc(sizeof(double) * N * N);
UB = (double*) malloc(sizeof(double) * b * b);
stride = N;
sc = rbcholu(A,N,stride,UB,UT);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(UB);
free(UT);
return sc;
}
int chol(double *A, int N) {
int sc;
if ( N <= (int) BLOCKSIZE) {
sc = cholu(A,N);
} else {
sc = bcholu(A,N);
}
return sc;
}
static void rchold(double *A,int N, int stride, double *U22) {
int j,i,u,w;
double d1;
if (N == 1) {
return;
} else {
d1 = A[0];
for (j = 1; j < N;++j) {
A[j] /= d1;
}
mmult(A+1,A+1,U22,N-1,1,N-1);
scale(U22,N-1,N-1,d1);
for (i = 0; i < N-1; ++i) {
u = stride + 1+ i * stride;
w = i * (N-1);
for(j = i; j < N-1;j++) {
A[j + u] -= U22[j + w];
}
}
rchold(A+stride+1,N-1,stride,U22);
}
}
void chold(double *A, int N) {
int stride,i,j,t;
double *U22;
U22 = (double*) malloc(sizeof(double) * N * N);
stride = N;
rchold(A,N,stride,U22);
for(i=0; i < N;++i) {
t = i *N;
for(j=0;j < i;++j) {
A[t+j] = 0.;
}
}
free(U22);
}
void svd_sort(double *U,int M,int N,double *V,double *q) {
/*
* Pavel Sakov's CSA SVD sort routine is used with some minor
* modifications. See The License below
*/
/*
* Copyright (C) 2000-2008 Pavel Sakov and CSIRO
Redistribution and use of material from the package `csa', with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of material must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. The names of the authors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
*/
int i,j;
double *UT,*VT,*qq;
int *pos;
UT = (double*) malloc(sizeof(double) * N * M);
VT = (double*) malloc(sizeof(double) * N * N);
qq = (double*) malloc(sizeof(double) * N);
pos = (int*) malloc(sizeof(int) * N);
for(i = 0;i < N;++i) {
qq[i] = q[i];
}
for(i = 0;i < M*N;++i) {
UT[i] = U[i];
}
for(i = 0;i < N*N;++i) {
VT[i] = V[i];
}
//mtranspose(U,M,N,UT);
//mtranspose(V,N,N,VT);
sort1d(q,N,pos);
for(i = 0; i < N;++i) {
q[i] = qq[pos[i]];
for (j = 0; j < M;++j) {
U[j*N+i] = UT[j*N+pos[i]];
}
for (j = 0; j < N;++j) {
V[j*N+i] = VT[j*N+pos[i]];
}
}
free(UT);
free(VT);
free(qq);
free(pos);
}
int svd(double *A,int M,int N,double *U,double *V,double *q) {
int i,j,k,l,t,t2,ierr,cancel,iter,l1;
double eps,g,x,s,temp,f,h,c,y,z,scale;
double *e;
/*
THIS SUBROUTINE IS THE MODIFIED C TRANSLATION OF THE
EISPACK FORTRAN TRANSLATION OF THE ALGOL PROCEDURE SVD,
NUM. MATH. 14, 403-420(1970) BY GOLUB AND REINSCH.
HANDBOOK FOR AUTO. COMP., VOL II-LINEAR ALGEBRA, 134-151(1971).
*/
/*
* U = MXN
* V - NXN
* Q - NX1
*/
/*
* The program return error codes
*
* Code 0 if the computation is successful
* Code -1 If M < N . Transpose the matrix such that rows > columns and trye again
* Code 15 if maximum iterations are reached without achieving convergence. Increase SVDMAXITER value
* in matrix.h header file. Default Value is 50
*
*/
if (M < N) {
printf("Rows (M) should be greater than Columns (B) \n");
printf("Retry By Transposing the Input Matrix");
return -1;
}
e = (double*) malloc(sizeof(double) * N);
ierr = 0;
eps = macheps();
g = scale = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M) {
for(k = i; k < M;++k) {
scale += fabs(U[k*N+i]);
}
if (scale != 0.0) {
for(k = i; k < M;++k) {
t = k * N;
U[t+i] /= scale;
temp = U[t+i];
s += temp*temp;
}
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
if (i < N - 1) {
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += U[t+i]*U[t+j];
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += f * U[t+i];
}
}
}
for(k = i; k < M;++k) {
t = k * N;
U[t+i] *= scale;
}
}
}
q[i] = scale * g;
g = 0.0;
s = 0.0;
scale = 0.0;
if (i < M && i != N - 1) {
t = i *N;
for(k = l; k < M;++k) {
scale += fabs(U[t+k]);
}
if (scale != 0.0) {
for(k = l; k < N;++k) {
U[t+k] /= scale;
temp = U[t+k];
s = s + temp*temp;
}
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
for (k = l; k < N; k++)
U[t+k] *= scale;
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
/*
ierr = 0;
eps = macheps();
tol = eps;
g = x = 0.0;
for(i = 0; i < M*N;++i) {
U[i] = A[i];
}
for(i = 0; i < N;++i) {
l = i+1;
e[i] = g;
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
temp = U[t+i];
s += temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[i*N+i];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[i*N+i] = f - g;
for(j = l; j < N;++j) {
s = 0.0;
for(k = i; k < M;++k) {
t = k * N;
s += (U[t+i]*U[t+j]);
}
f = s / h;
for(k = i; k < M;++k) {
t = k * N;
U[t+j] += (f * U[t+i]);
}
}
}
q[i] = g;
s = 0.0;
t = i * N;
for(k = l; k < N;++k) {
temp = U[t+k];
s = s + temp*temp;
}
if (s < tol) {
g = 0.0;
} else {
f = U[t+l];
g = (f < 0) ? sqrt(s) : -sqrt(s);
h = f * g - s;
U[t+l] = f - g;
for(k = l;k < N;++k) {
e[k] = U[t+k] / h;
}
for (j = l; j < M; j++) {
s = 0.0;
t2 = j * N;
for (k = l; k < N; k++) {
s += U[t2+k] * U[t+k];
}
for (k = l; k < N; k++) {
U[t2+k] += s * e[k];
}
}
}
temp = fabs(q[i]) + fabs(e[i]);
if (x < temp) {
x = temp;
}
}
*/
//Accumulating Right Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
if (i < N - 1) {
if (g != 0.0) {
h = U[t+i+1] * g;
for(j = l;j < N;++j) {
V[j*N+i] = U[t+j] / h;
}
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < N;++k) {
s += U[t+k] * V[k*N+j];
}
for(k = l; k < N;++k) {
V[k*N+j] += (s * V[k*N+i]);
}
}
}
for(j = l; j < N;++j) {
V[t+j] = V[j*N+i] = 0.0;
}
}
V[t+i] = 1.0;
g = e[i];
l = i;
}
//Accumulating Left Hand Transformations
for(i = N - 1;i >= 0;--i) {
t = i * N;
l = i+1;
g = q[i];
if (i < N - 1) {
for(j = l;j < N;++j) {
U[t+j] = 0.0;
}
}
if (g != 0.0) {
if (i != N - 1) {
//h = U[t+i] * g;
for(j = l;j < N;++j) {
s = 0.0;
for(k = l; k < M;++k) {
s += (U[k*N+i] * U[k*N+j]);
}
f = (s / U[t+i]) / g;
for(k = i; k < M;++k) {
U[k*N+j] += (f * U[k*N+i]);
}
}
}
for(j = i; j < M;++j) {
U[j*N+i] = U[j*N+i] / g;
}
} else {
for(j = i; j < M;++j) {
U[j*N+i] = 0.0;
}
}
U[t+i] += 1.0;
}
// mdisplay(U,M,N);
eps = eps * x;
for(k = N - 1; k >= 0; --k) {
iter = 0;
while(1) {
iter++;
if (iter > SVDMAXITER) {
printf("Convergence Not Achieved \n");
return 15;
}
cancel = 1;
for(l = k; l >= 0; --l) {
if (fabs(e[l]) <= eps) {
cancel = 0; //test f convergence
break;
}
if (fabs(q[l-1]) <= eps) {
//Cancel
break;
}
}
if (cancel) {
c = 0.0;
s = 1.0;
l1 = l - 1;
for(i = l; i <= k;++i) {
f = s*e[i];
e[i] *= c;
if (fabs(f) <= eps) {
break;
}
g = q[i];
h = q[i] = hypot(f,g);
c = g/h;
s = -f/h;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+l1];
z = U[t+i];
U[t+l1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
}
z = q[k];
if (l != k) {
x = q[l];
y = q[k-1];
g = e[k-1];
h = e[k];
f = 0.5 * (((g + z) / h) * ((g - z) / y) + y / h - h / y);
g = hypot(f,1.0);
if (f < 0.0) {
temp = f - g;
} else {
temp = f+g;
}
f = x - (z / x) * z + (h / x) * (y / temp - h);
//Next QR Transformation
c = s = 1.0;
for(i = l+1; i <= k;++i) {
g = e[i];
y = q[i];
h = s * g;
g = c * g;
e[i-1] = z = hypot(f,h);
c = f / z;
s = h / z;
f = x * c + g * s;
g = g * c - x * s;
h = y * s;
y *= c;
for(j = 0; j < N;++j) {
t = j * N;
x = V[t+i-1];
z = V[t+i];
V[t+i-1] = x * c + z * s;
V[t+i] = z * c - x * s;
}
q[i-1] = z = hypot(f,h);
if (z != 0.0) {
c = f / z;
s = h / z;
}
f = c * g + s * y;
x = c * y - s * g;
for(j = 0; j < M;++j) {
t = j * N;
y = U[t+i-1];
z = U[t+i];
U[t+i-1] = y * c + z * s;
U[t+i] = z * c - y * s;
}
}
e[l] = 0.0;
e[k] = f;
q[k] = x;
} else {
//convergence
if (z < 0.0) {
q[k] = -z;
for (j = 0; j < N; j++) {
t = j *N;
V[t+k] = -V[t+k];
}
}
break;
}
}
}
svd_sort(U,M,N,V,q);
free(e);
return ierr;
}
static int rank_c(double *A, int M,int N) {
int i,rnk,ret;
double eps,tol,szmax,qmax;
double *U,*V,*q;
U = (double*) malloc(sizeof(double) * M*N);
V = (double*) malloc(sizeof(double) * N*N);
q = (double*) malloc(sizeof(double) * N);
eps = macheps();
rnk = 0;
if (M < N) {
//mtranspose(A,M,N,U);
szmax = (double) N;
} else {
szmax = (double) M;
}
ret = svd(A,M,N,U,V,q);
qmax = q[0];
if ( ret != 0) {
printf("Failed to Compute SVD");
return -1;
}
tol = qmax*szmax *eps;
for(i = 0; i < N;++i) {
if (q[i] > tol) {
rnk++;
}
}
free(U);
free(V);
free(q);
return rnk;
}
int rank(double *A, int M,int N) {
int rnk;
double *AT;
AT = (double*) malloc(sizeof(double) * M*N);
if (M < N) {
mtranspose(A,M,N,AT);
rnk = rank_c(AT,N,M);
} else {
rnk = rank_c(A,M,N);
}
free(AT);
return rnk;
}
|
wave3d.c |
#ifndef TAPENADE
#include <math.h>
#endif
#define Max(x,y) fmax(x,y)
#define Min(x,y) fmin(x,y)
#define Heaviside(x) ((x>=0)?1.0:0.0)
#define u(x,xx,xxx) u[x][xx][xxx]
#define c(x,xx,xxx) c[x][xx][xxx]
#define u_1(x,xx,xxx) u_1[x][xx][xxx]
#define u_2(x,xx,xxx) u_2[x][xx][xxx]
void wave3d(double* u_vec, double* c_vec, double* u_1_vec, double* u_2_vec, double D, int n) {
double (*u)[n][n] = (double (*)[n][n]) u_vec;
double (*c)[n][n] = (double (*)[n][n]) c_vec;
double (*u_1)[n][n] = (double (*)[n][n]) u_1_vec;
double (*u_2)[n][n] = (double (*)[n][n]) u_2_vec;
int i;
int j;
int k;
#pragma omp parallel for private(k,j,i)
for ( i=1; i<=n - 2; i++ ) {
for ( j=1; j<=n - 2; j++ ) {
for ( k=1; k<=n - 2; k++ ) {
u(i,j,k) += D*(-6*u_1(i, j, k) + u_1(i, j, k - 1) + u_1(i, j, k + 1) + u_1(i, j - 1, k) + u_1(i, j + 1, k) + u_1(i - 1, j, k) + u_1(i + 1, j, k))*c(i, j, k) + 2.0*u_1(i, j, k) - u_2(i, j, k);
}
}
}
}
|
GB_unop__lgamma_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lgamma_fp64_fp64
// op(A') function: GB_unop_tran__lgamma_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = lgamma (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = lgamma (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = lgamma (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LGAMMA || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lgamma_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = lgamma (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lgamma_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
argon2_fmt_plug.c | /*
* This software is Copyright (c) 2016 Agnieszka Bielec <bielecagnieszka8 at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* merged argon2d and argon2i into a single format file. JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_argon2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_argon2);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "argon2.h"
#include "argon2_core.h"
#include "argon2_encoding.h"
#include "memdbg.h"
#define FORMAT_LABEL "argon2"
#define FORMAT_NAME ""
#define FORMAT_TAG_d "$argon2d$"
#define FORMAT_TAG_i "$argon2i$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG_d)-1)
#if defined(__XOP__)
#define ALGORITHM_NAME "Blake2 XOP"
#elif defined(__AVX__)
#define ALGORITHM_NAME "Blake2 AVX"
#elif defined(__SSSE3__)
#define ALGORITHM_NAME "Blake2 SSSE3"
#elif defined(__SSE2__)
#define ALGORITHM_NAME "Blake2 SSE2"
#else
#define ALGORITHM_NAME "Blake2"
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 100 //only in john
#define BINARY_SIZE 256 //only in john
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE 64 //only in john
#define SALT_ALIGN sizeof(uint32_t)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define OMP_SCALE 16
#ifdef _OPENMP
#define THREAD_NUMBER omp_get_thread_num()
#else
#define THREAD_NUMBER 1
#endif
static struct fmt_tests tests[] = {
{"$argon2d$v=19$m=4096,t=3,p=1$ZGFtYWdlX2RvbmU$w9w3s5/zV8+PcAZlJhnTCOE+vBkZssmZf6jOq3dKv50","password"},
{"$argon2i$v=19$m=4096,t=3,p=1$ZGFtYWdlX2RvbmU$N59QwnpxDQZRj1/cO6bqm408dD6Z2Z9LKYpwFJSPVKA","password"},
{"$argon2d$v=19$m=4096,t=3,p=1$c2hvcnRfc2FsdA$zMrTcOAOUje6UqObRVh84Pe1K6gumcDqqGzRM0ILzYmj","sacrificed"},
{"$argon2i$v=19$m=4096,t=3,p=1$c2hvcnRfc2FsdA$1l4kAwUdAApoCbFH7ghBEf7bsdrOQzE4axIJ3PV0Ncrd","sacrificed"},
{"$argon2d$v=19$m=16384,t=3,p=1$c2hvcnRfc2FsdA$TLSTPihIo+5F67Y1vJdfWdB9","blessed_dead"},
{"$argon2i$v=19$m=16384,t=3,p=1$c2hvcnRfc2FsdA$vvjDVog22A5x9eljmB+2yC8y","blessed_dead"},
{"$argon2d$v=19$m=16384,t=4,p=3$YW5vdGhlcl9zYWx0$yw93eMxC8REPAwbQ0e/q43jR9+RI9HI/DHP75uzm7tQfjU734oaI3dzcMWjYjHzVQD+J4+MG+7oyD8dN/PtnmPCZs+UZ67E+rkXJ/wTvY4WgXgAdGtJRrAGxhy4rD7d5G+dCpqhrog","death_dying"},
{"$argon2i$v=19$m=16384,t=4,p=3$YW5vdGhlcl9zYWx0$K7unxwO5aeuZCpnIJ06FMCRKod3eRg8oIRzQrK3E6mGbyqlTvvl47jeDWq/5drF1COJkEF9Ty7FWXJZHa+vqlf2YZGp/4qSlAvKmdtJ/6JZU32iQItzMRwcfujHE+PBjbL5uz4966A","death_dying"},
{NULL}
};
struct argon2_salt {
uint32_t t_cost, m_cost, lanes;
uint32_t hash_size;
uint32_t salt_length;
char salt[SALT_SIZE];
argon2_type type;
};
static struct argon2_salt saved_salt;
static region_t * memory;
static void **pseudo_rands;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static int threads;
static size_t saved_mem_size;
static uint32_t saved_segment_length;
static unsigned char (*crypted)[BINARY_SIZE];
static void *get_salt(char *ciphertext);
static void init(struct fmt_main *self)
{
int i;
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
threads=omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#else
threads=1;
#endif
saved_key =
mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key));
crypted = mem_calloc(self->params.max_keys_per_crypt, (BINARY_SIZE));
saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(int));
memory=mem_calloc(threads, sizeof(region_t));
pseudo_rands=mem_calloc(threads,sizeof(void*));
for (i=0;i<threads;i++)
{
init_region_t(&memory[i]);
pseudo_rands[i]=NULL;
}
saved_mem_size=0;
saved_segment_length=0;
}
static void done(void)
{
int i;
for (i=0;i<threads;i++)
{
free_region_t(&memory[i]);
MEM_FREE(pseudo_rands[i]);
}
MEM_FREE(memory);
MEM_FREE(pseudo_rands);
MEM_FREE(saved_len);
MEM_FREE(crypted);
MEM_FREE(saved_key);
}
static void print_memory(double memory)
{
char s[]="\0kMGT";
int i=0;
while(memory>=1024)
{
memory/=1024;
i++;
}
printf("memory per hash : %.2lf %cB\n",memory,s[i]);
}
static void reset(struct db_main *db)
{
static int printed=0;
if (!printed && options.verbosity > VERB_LEGACY)
{
int i;
uint32_t m_cost, prev_m_cost;
m_cost=prev_m_cost=0;
if (!db) {
for (i = 0; tests[i].ciphertext; i++)
{
struct argon2_salt *salt;
salt=get_salt(tests[i].ciphertext);
m_cost = MAX(m_cost, salt->m_cost);
if (i==0)
{
printf("\n");
prev_m_cost=m_cost;
print_memory(sizeof(block)*m_cost);
}
}
if (prev_m_cost!=m_cost)
{
printf("max ");
print_memory(sizeof(block)*m_cost);
}
} else {
struct db_salt *salts = db->salts;
while (salts != NULL) {
struct argon2_salt * salt=salts->salt;
m_cost = MAX(m_cost, salt->m_cost);
salts = salts->next;
}
printf("\n");
print_memory(sizeof(block)*m_cost);
}
}
}
static void ctx_init(argon2_context *ctx)
{
//size_t maxadlen = ctx->adlen;
//size_t maxsaltlen = ctx->saltlen;
//size_t maxoutlen = ctx->outlen;
static uint8_t out[BINARY_SIZE];
static uint8_t salt[SALT_SIZE];
ctx->adlen=0;
ctx->saltlen=SALT_SIZE;
ctx->outlen=BINARY_SIZE;
ctx->out=out;
ctx->salt=salt;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
argon2_context ctx;
int res;
ctx_init(&ctx);
if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN))
res=argon2_decode_string(&ctx, ciphertext, Argon2_d);
else if (!strncmp(ciphertext, FORMAT_TAG_i, FORMAT_TAG_LEN))
res=argon2_decode_string(&ctx, ciphertext, Argon2_i);
else
return 0;
if (res!=ARGON2_OK || ctx.outlen < 8)
return 0;
return 1;
}
static void set_key(char *key, int index)
{
saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
static void *get_binary(char *ciphertext)
{
static char out[BINARY_SIZE];
argon2_context ctx;
ctx_init(&ctx);
if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN))
argon2_decode_string(&ctx, ciphertext, Argon2_d);
else
argon2_decode_string(&ctx, ciphertext, Argon2_i);
memset(out, 0, BINARY_SIZE);
memcpy(out, ctx.out, ctx.outlen);
return out;
}
static void *get_salt(char *ciphertext)
{
static struct argon2_salt salt;
argon2_context ctx;
memset(&salt,0,sizeof(salt));
ctx_init(&ctx);
if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN)) {
argon2_decode_string(&ctx, ciphertext, Argon2_d);
salt.type = Argon2_d;
} else {
argon2_decode_string(&ctx, ciphertext, Argon2_i);
salt.type = Argon2_i;
}
salt.salt_length = ctx.saltlen;
salt.m_cost = ctx.m_cost;
salt.t_cost = ctx.t_cost;
salt.lanes = ctx.lanes;
salt.hash_size = ctx.outlen;
memcpy(salt.salt, ctx.salt, ctx.saltlen);
return (void *)&salt;
}
static void set_salt(void *salt)
{
uint32_t i;
size_t mem_size;
uint32_t segment_length, memory_blocks;
memcpy(&saved_salt,salt,sizeof(struct argon2_salt));
mem_size=sizeof(block)*saved_salt.m_cost;
memory_blocks = saved_salt.m_cost;
if (memory_blocks < 2 * ARGON2_SYNC_POINTS * saved_salt.lanes) {
memory_blocks = 2 * ARGON2_SYNC_POINTS * saved_salt.lanes;
}
segment_length = memory_blocks / (saved_salt.lanes * ARGON2_SYNC_POINTS);
if (mem_size>saved_mem_size)
{
if (saved_mem_size>0)
for (i=0;i<threads;i++)
free_region_t(&memory[i]);
for (i=0;i<threads;i++)
alloc_region_t(&memory[i],mem_size);
saved_mem_size=mem_size;
}
if (segment_length>saved_segment_length)
{
if (saved_segment_length>0)
for (i=0;i<threads;i++)
MEM_FREE(pseudo_rands[i]);
for (i=0;i<threads;i++)
pseudo_rands[i]=mem_calloc(sizeof(uint64_t), segment_length);
saved_segment_length=segment_length;
}
}
static int cmp_all(void *binary, int count)
{
int i;
for (i = 0; i < count; i++) {
if (!memcmp(binary, crypted[i], saved_salt.hash_size))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypted[index], saved_salt.hash_size);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int i;
const int count = *pcount;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
argon2_hash(saved_salt.t_cost, saved_salt.m_cost, saved_salt.lanes, saved_key[i], saved_len[i], saved_salt.salt,
saved_salt.salt_length, crypted[i], saved_salt.hash_size, 0, 0, saved_salt.type, ARGON2_VERSION_NUMBER, memory[THREAD_NUMBER%threads].aligned, pseudo_rands[THREAD_NUMBER%threads]);
}
return count;
}
#define COMMON_GET_HASH_VAR crypted
#include "common-get-hash.h"
static int salt_hash(void *_salt)
{
int i;
struct argon2_salt *salt = (struct argon2_salt*)_salt;
unsigned int hash = 0;
char *p = salt->salt;
for (i=0;i<salt->salt_length;i++) {
hash <<= 1;
hash += (unsigned char)*p++;
if (hash >> SALT_HASH_LOG) {
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
}
}
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
return hash;
}
#if FMT_MAIN_VERSION > 11
static unsigned int tunable_cost_t(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return salt->t_cost;
}
static unsigned int tunable_cost_m(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return salt->m_cost;
}
static unsigned int tunable_cost_p(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return salt->lanes;
}
static unsigned int tunable_cost_type(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return (int)salt->type;
}
#endif
struct fmt_main fmt_argon2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
sizeof(struct argon2_salt),
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
{
"t",
"m",
"p",
"type [0:Argon2d 1:Argon2i]"
},
{0},
tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
tunable_cost_t,
tunable_cost_m,
tunable_cost_p,
tunable_cost_type,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(12*t1+Ny+21,32)),floord(24*t2+Ny+20,32)),floord(24*t1-24*t2+Nz+Ny+19,32));t3++) {
for (t4=max(max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32)),ceild(32*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(12*t1+Nx+21,32)),floord(24*t2+Nx+20,32)),floord(32*t3+Nx+28,32)),floord(24*t1-24*t2+Nz+Nx+19,32));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),32*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),32*t3+30),32*t4+30),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
ast-dump-openmp-distribute.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp distribute
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp distribute
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp distribute collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:4:9, col:23>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:4:9) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:10:9, col:23>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:10:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:17:9, col:35>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:24, col:34>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:33> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:33> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:17:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:24:9, col:35>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:24, col:34>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:33> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:33> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:24:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPDistributeDirective {{.*}} <line:31:9, col:35>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:24, col:34>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:33> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:33> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:31:9) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
GB_binop__pair_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_uint16)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint16)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = 1
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_UINT16 || GxB_NO_PAIR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pair_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
level.c | // RUN: %compile-run-and-check
#include <omp.h>
#include <stdio.h>
const int MaxThreads = 1024;
const int NumThreads = 64;
int main(int argc, char *argv[]) {
int level = -1, activeLevel = -1;
// The expected value is -1, initialize to different value.
int ancestorTNumNeg = 1, teamSizeNeg = 1;
int ancestorTNum0 = -1, teamSize0 = -1;
// The expected value is -1, initialize to different value.
int ancestorTNum1 = 1, teamSize1 = 1;
int check1[MaxThreads];
int check2[MaxThreads];
int check3[MaxThreads];
int check4[MaxThreads];
for (int i = 0; i < MaxThreads; i++) {
check1[i] = check2[i] = check3[i] = check4[i] = 0;
}
#pragma omp target map(level, activeLevel, ancestorTNumNeg, teamSizeNeg) \
map(ancestorTNum0, teamSize0, ancestorTNum1, teamSize1) \
map(check1[:], check2[:], check3[:], check4[:])
{
level = omp_get_level();
activeLevel = omp_get_active_level();
// Expected to return -1.
ancestorTNumNeg = omp_get_ancestor_thread_num(-1);
teamSizeNeg = omp_get_team_size(-1);
// Expected to return 0 and 1.
ancestorTNum0 = omp_get_ancestor_thread_num(0);
teamSize0 = omp_get_team_size(0);
// Expected to return -1 because the requested level is larger than
// the nest level.
ancestorTNum1 = omp_get_ancestor_thread_num(1);
teamSize1 = omp_get_team_size(1);
// Expecting active parallel region.
#pragma omp parallel num_threads(NumThreads)
{
int id = omp_get_thread_num();
// Multiply return value of omp_get_level by 5 to avoid that this test
// passes if both API calls return wrong values.
check1[id] += omp_get_level() * 5 + omp_get_active_level();
// Expected to return 0 and 1.
check2[id] += omp_get_ancestor_thread_num(0) + 5 * omp_get_team_size(0);
// Expected to return the current thread num.
check2[id] += (omp_get_ancestor_thread_num(1) - id);
// Exepcted to return the current number of threads.
check2[id] += 3 * omp_get_team_size(1);
// Expected to return -1, see above.
check2[id] += omp_get_ancestor_thread_num(2) + omp_get_team_size(2);
// Expecting serialized parallel region.
#pragma omp parallel
{
#pragma omp atomic
check3[id] += omp_get_level() * 5 + omp_get_active_level();
// Expected to return 0 and 1.
int check4Inc = omp_get_ancestor_thread_num(0) + 5 * omp_get_team_size(0);
// Expected to return the parent thread num.
check4Inc += (omp_get_ancestor_thread_num(1) - id);
// Exepcted to return the number of threads in the active parallel region.
check4Inc += 3 * omp_get_team_size(1);
// Exptected to return 0 and 1.
check4Inc += omp_get_ancestor_thread_num(2) + 3 * omp_get_team_size(2);
// Expected to return -1, see above.
check4Inc += omp_get_ancestor_thread_num(3) + omp_get_team_size(3);
#pragma omp atomic
check4[id] += check4Inc;
}
}
}
// CHECK: target: level = 0, activeLevel = 0
printf("target: level = %d, activeLevel = %d\n", level, activeLevel);
// CHECK: level = -1: ancestorTNum = -1, teamSize = -1
printf("level = -1: ancestorTNum = %d, teamSize = %d\n", ancestorTNumNeg, teamSizeNeg);
// CHECK: level = 0: ancestorTNum = 0, teamSize = 1
printf("level = 0: ancestorTNum = %d, teamSize = %d\n", ancestorTNum0, teamSize0);
// CHECK: level = 1: ancestorTNum = -1, teamSize = -1
printf("level = 1: ancestorTNum = %d, teamSize = %d\n", ancestorTNum1, teamSize1);
// CHECK-NOT: invalid
for (int i = 0; i < MaxThreads; i++) {
// Check active parallel region:
// omp_get_level() = 1, omp_get_active_level() = 1
const int Expected1 = 6;
if (i < NumThreads) {
if (check1[i] != Expected1) {
printf("invalid: check1[%d] should be %d, is %d\n", i, Expected1, check1[i]);
}
} else if (check1[i] != 0) {
printf("invalid: check1[%d] should be 0, is %d\n", i, check1[i]);
}
// 5 * 1 + 3 * 64 - 1 - 1 (see above)
const int Expected2 = 195;
if (i < NumThreads) {
if (check2[i] != Expected2) {
printf("invalid: check2[%d] should be %d, is %d\n", i, Expected2, check2[i]);
}
} else if (check2[i] != 0) {
printf("invalid: check2[%d] should be 0, is %d\n", i, check2[i]);
}
// Check serialized parallel region:
// omp_get_level() = 2, omp_get_active_level() = 1
const int Expected3 = 11;
if (i < NumThreads) {
if (check3[i] != Expected3) {
printf("invalid: check3[%d] should be %d, is %d\n", i, Expected3, check3[i]);
}
} else if (check3[i] != 0) {
printf("invalid: check3[%d] should be 0, is %d\n", i, check3[i]);
}
// 5 * 1 + 3 * 64 + 3 * 1 - 1 - 1 (see above)
const int Expected4 = 198;
if (i < NumThreads) {
if (check4[i] != Expected4) {
printf("invalid: check4[%d] should be %d, is %d\n", i, Expected4, check4[i]);
}
} else if (check4[i] != 0) {
printf("invalid: check4[%d] should be 0, is %d\n", i, check4[i]);
}
}
return 0;
}
|
GB_unaryop__minv_uint32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint32_uint64
// op(A') function: GB_tran__minv_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 32)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 32) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint32_uint64
(
uint32_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
utils.h | #include "fstream"
#include "iostream"
#include <faiss/utils/random.h>
#include <faiss/utils/Heap.h>
#include <queue>
#include <chrono>
#include <string.h>
#include <fstream>
#include <sys/resource.h>
#include <sys/stat.h>
#include <dirent.h>
namespace bslib{
using idx_t = int64_t;
struct time_recorder{
std::chrono::steady_clock::time_point start_time;
public:
time_recorder(){
start_time = std::chrono::steady_clock::now();
}
float getTimeConsumption(){
std::chrono::steady_clock::time_point end_time = std::chrono::steady_clock::now();
return (std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time)).count();
}
void reset(){
start_time = std::chrono::steady_clock::now();
}
void record_time_usage(std::ofstream & output_record, std::string s){
output_record << s << " The time usage: " << getTimeConsumption() / 1000000 << " s " << std::endl;
}
void print_time_usage(std::string s){
std::cout << s << " The time usage: " << getTimeConsumption() / 1000000 << " s "<< std::endl;
}
float get_time_usage(){
return getTimeConsumption() / 1000000;
}
};
struct memory_recorder{
public:
void record_memory_usage(std::ofstream & output_record, std::string s){
rusage r_usage;
getrusage(RUSAGE_SELF, &r_usage);
output_record << s << " The memory usage: " << r_usage.ru_ixrss << " KB / " << r_usage.ru_isrss << " KB / " << r_usage.ru_idrss << " KB / " << r_usage.ru_maxrss << " KB " << std::endl;
}
void print_memory_usage(std::string s){
rusage r_usage;
getrusage(RUSAGE_SELF, &r_usage);
std::cout << s << " The memory usage: " << r_usage.ru_ixrss << " / " << r_usage.ru_isrss << " / " << r_usage.ru_idrss << " / " << r_usage.ru_maxrss << std::endl;
}
};
struct recall_recorder{
public:
void print_recall_performance(size_t n_query, float recall, size_t recall_k, std::string mode, const size_t nprobe){
std::cout << "The recall@ " << recall_k << " for " << n_query << " queries in " << mode << " mode is: " << recall << std::endl;
std::cout << "The search parameters is: ";
std::cout << "The nprobe is: " << nprobe << std::endl;
}
void record_recall_performance(std::ofstream & output_record, size_t n_query, float recall, size_t recall_k, std::string mode, const size_t nprobe){
output_record << "The recall@" << recall_k << " for " << n_query << " queries in " << mode << " mode is: " << recall << std::endl;
output_record << "The search parameters is ";
output_record << "The nprobe is: " << nprobe << std::endl;
}
};
template<typename T>
void CheckResult(T * data, const size_t dimension, size_t dataset_size = 2){
std::cout << "Printing sample (2 vectors) of the dataset " << std::endl;
for (size_t i = 0; i < dimension; i++){
std::cout << data[i] << " ";
}
std::cout << std::endl << std::endl;
for (size_t i = 0; i< dimension; i++){
std::cout << data[(dataset_size-1) * dimension+i] << " ";
}
std::cout << std::endl << std::endl;
}
template<typename T>
void readXvec(std::ifstream & in, T * data, const size_t dimension, const size_t n,
bool CheckFlag = false, bool ShowProcess = false){
if (ShowProcess)
std::cout << "Loading data with " << n << " vectors in " << dimension << std::endl;
uint32_t dim = dimension;
size_t print_every = n / 10;
for (size_t i = 0; i < n; i++){
in.read((char *) & dim, sizeof(uint32_t));
if (dim != dimension){
std::cout << dim << " " << dimension << " dimension error \n";
exit(1);
}
in.read((char *) (data + i * dim), dim * sizeof(T));
if ( ShowProcess && print_every != 0 && i % print_every == 0)
std::cout << "[Finished loading " << i << " / " << n << "]" << std::endl;
}
if (CheckFlag)
CheckResult<T>(data, dimension, n);
}
template<typename T>
uint32_t GetXvecSize(std::ifstream & in, const size_t dimension){
in.seekg(0, std::ios::end);
size_t FileSize = (size_t) in.tellg();
std::cout << "The file size is " << FileSize / 1000 << " KB " << std::endl;
size_t DataSize = (unsigned) (FileSize / (dimension * sizeof(T) + sizeof(uint32_t)));
std::cout << "The data size is " << DataSize << std::endl;
return DataSize;
}
template<typename T>
void readXvecFvec(std::ifstream & in, float * data, const size_t dimension, const size_t n = 1,
bool CheckFlag = false, bool ShowProcess = false){
if (ShowProcess)
std::cout << "Loading data with " << n << " vectors in " << dimension << std::endl;
uint32_t dim = dimension;
std::vector<T> origin_data(dimension);
size_t print_every = n / 10;
for (size_t i = 0; i < n; i++){
in.read((char * ) & dim, sizeof(uint32_t));
if (dim != dimension) {
std::cout << dim << " " << dimension << " dimension error \n";
exit(1);
}
in.read((char * ) origin_data.data(), dim * sizeof(T));
for (size_t j = 0; j < dimension; j++){
data[i * dim + j] = 1.0 * origin_data[j];
}
if ( ShowProcess && print_every != 0 && i % (print_every) == 0)
std::cout << "[Finished loading " << i << " / " << n << "]" << std::endl;
}
if (CheckFlag)
CheckResult<float>(data, dimension, n);
}
template<typename T>
void writeXvec(std::ofstream & out, T * data, const size_t dimension, const size_t n = 1,
bool ShowProcess = false){
uint32_t dim = dimension;
size_t print_every = 1000;
for (size_t i = 0; i< n; i++){
out.write((char *) & dim, sizeof(uint32_t));
out.write((char *) (data + i * dim), dim * sizeof(T));
if ( ShowProcess && print_every != 0 && i % print_every == 0)
std::cout << "[Finished writing " << i << " / " << n << "]" << std::endl;
}
}
inline bool exists(const std::string FilePath){
std::ifstream f (FilePath);
return f.good();
}
template<typename T>
void RandomSubset(const T * x, T * output, size_t dimension, size_t n, size_t sub_n){
long RandomSeed = 1234;
std::vector<int> RandomId(n);
faiss::rand_perm(RandomId.data(), n, RandomSeed);
for (size_t i = 0; i < sub_n; i++)
memcpy(output + i * dimension, x + RandomId[i] * dimension, sizeof(T) * dimension);
}
inline void PrintMessage(std::string s){
std::cout << s << std::endl;
}
inline void PrepareFolder(const char * FilePath){
if(NULL==opendir(FilePath))
mkdir(FilePath, S_IRWXU); //Have the right to read, write and execute
}
template<typename T>
inline void HashMapping(size_t n, const T * group_ids, T * hash_ids, size_t hash_size){
#pragma omp parallel for
for (size_t i = 0; i < n; i++)
hash_ids[i] = group_ids[i] % hash_size;
}
inline std::string GetNowTime() {
time_t setTime;
time(&setTime);
tm* ptm = localtime(&setTime);
std::string time = std::to_string(ptm->tm_year + 1900)
+ " "
+ std::to_string(ptm->tm_mon + 1)
+ " "
+ std::to_string(ptm->tm_mday)
+ " "
+ std::to_string(ptm->tm_hour) + " "
+ std::to_string(ptm->tm_min) + " "
+ std::to_string(ptm->tm_sec);
return time;
}
/**
*
* This is the function for getting the product between query and base vector
*
* Input:
* code: the code of base vectors
*
* Output:
* the product value of query and quantized base vectors
*
**/
float pq_L2sqr(const uint8_t *code, const float * precomputed_table, size_t code_size, size_t ksub);
/**
*
* This is the function for keeping k results in m result value
*
* Input:
* m: the total number of result pairs
* k: the number of result pairs that to be kept
* all_dists: the origin results of dists size: m
* all_labels: the origin label results size: m
* sub_dists: the kept result of dists size: k
* sub_labels: the kept result of labels size: k
*
**/
void keep_k_min(const size_t m, const size_t k, const float * all_dists, const idx_t * all_labels, float * sub_dists, idx_t * sub_labels);
void keep_k_min_alpha(const size_t m, const size_t k, const float * all_dists, const idx_t * all_labels, const float * all_alphas,
float * sub_dists, idx_t * sub_labels, float * sub_alphas);
}
|
column_matrix.h | /*!
* Copyright 2017 by Contributors
* \file column_matrix.h
* \brief Utility for fast column-wise access
* \author Philip Cho
*/
#ifndef XGBOOST_COMMON_COLUMN_MATRIX_H_
#define XGBOOST_COMMON_COLUMN_MATRIX_H_
#include <limits>
#include <vector>
#include <memory>
#include "hist_util.h"
namespace xgboost {
namespace common {
class ColumnMatrix;
/*! \brief column type */
enum ColumnType {
kDenseColumn,
kSparseColumn
};
/*! \brief a column storage, to be used with ApplySplit. Note that each
bin id is stored as index[i] + index_base.
Different types of column index for each column allow
to reduce the memory usage. */
template <typename BinIdxType>
class Column {
public:
Column(ColumnType type, common::Span<const BinIdxType> index, const uint32_t index_base)
: type_(type),
index_(index),
index_base_(index_base) {}
virtual ~Column() = default;
uint32_t GetGlobalBinIdx(size_t idx) const {
return index_base_ + static_cast<uint32_t>(index_[idx]);
}
BinIdxType GetFeatureBinIdx(size_t idx) const { return index_[idx]; }
uint32_t GetBaseIdx() const { return index_base_; }
common::Span<const BinIdxType> GetFeatureBinIdxPtr() const { return index_; }
ColumnType GetType() const { return type_; }
/* returns number of elements in column */
size_t Size() const { return index_.size(); }
private:
/* type of column */
ColumnType type_;
/* bin indexes in range [0, max_bins - 1] */
common::Span<const BinIdxType> index_;
/* bin index offset for specific feature */
const uint32_t index_base_;
};
template <typename BinIdxType>
class SparseColumn: public Column<BinIdxType> {
public:
SparseColumn(ColumnType type, common::Span<const BinIdxType> index,
uint32_t index_base, common::Span<const size_t> row_ind)
: Column<BinIdxType>(type, index, index_base),
row_ind_(row_ind) {}
const size_t* GetRowData() const { return row_ind_.data(); }
size_t GetRowIdx(size_t idx) const {
return row_ind_.data()[idx];
}
private:
/* indexes of rows */
common::Span<const size_t> row_ind_;
};
template <typename BinIdxType>
class DenseColumn: public Column<BinIdxType> {
public:
DenseColumn(ColumnType type, common::Span<const BinIdxType> index,
uint32_t index_base, const std::vector<bool>& missing_flags,
size_t feature_offset)
: Column<BinIdxType>(type, index, index_base),
missing_flags_(missing_flags),
feature_offset_(feature_offset) {}
bool IsMissing(size_t idx) const { return missing_flags_[feature_offset_ + idx]; }
private:
/* flags for missing values in dense columns */
const std::vector<bool>& missing_flags_;
size_t feature_offset_;
};
/*! \brief a collection of columns, with support for construction from
GHistIndexMatrix. */
class ColumnMatrix {
public:
// get number of features
inline bst_uint GetNumFeature() const {
return static_cast<bst_uint>(type_.size());
}
// construct column matrix from GHistIndexMatrix
inline void Init(const GHistIndexMatrix& gmat,
double sparse_threshold) {
const int32_t nfeature = static_cast<int32_t>(gmat.cut.Ptrs().size() - 1);
const size_t nrow = gmat.row_ptr.size() - 1;
// identify type of each column
feature_counts_.resize(nfeature);
type_.resize(nfeature);
std::fill(feature_counts_.begin(), feature_counts_.end(), 0);
uint32_t max_val = std::numeric_limits<uint32_t>::max();
for (int32_t fid = 0; fid < nfeature; ++fid) {
CHECK_LE(gmat.cut.Ptrs()[fid + 1] - gmat.cut.Ptrs()[fid], max_val);
}
bool all_dense = gmat.IsDense();
gmat.GetFeatureCounts(&feature_counts_[0]);
// classify features
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (static_cast<double>(feature_counts_[fid])
< sparse_threshold * nrow) {
type_[fid] = kSparseColumn;
all_dense = false;
} else {
type_[fid] = kDenseColumn;
}
}
// want to compute storage boundary for each feature
// using variants of prefix sum scan
feature_offsets_.resize(nfeature + 1);
size_t accum_index_ = 0;
feature_offsets_[0] = accum_index_;
for (int32_t fid = 1; fid < nfeature + 1; ++fid) {
if (type_[fid - 1] == kDenseColumn) {
accum_index_ += static_cast<size_t>(nrow);
} else {
accum_index_ += feature_counts_[fid - 1];
}
feature_offsets_[fid] = accum_index_;
}
SetTypeSize(gmat.max_num_bins);
index_.resize(feature_offsets_[nfeature] * bins_type_size_, 0);
if (!all_dense) {
row_ind_.resize(feature_offsets_[nfeature]);
}
// store least bin id for each feature
index_base_ = const_cast<uint32_t*>(gmat.cut.Ptrs().data());
const bool noMissingValues = NoMissingValues(gmat.row_ptr[nrow], nrow, nfeature);
any_missing_ = !noMissingValues;
if (noMissingValues) {
missing_flags_.resize(feature_offsets_[nfeature], false);
} else {
missing_flags_.resize(feature_offsets_[nfeature], true);
}
// pre-fill index_ for dense columns
if (all_dense) {
BinTypeSize gmat_bin_size = gmat.index.GetBinTypeSize();
if (gmat_bin_size == kUint8BinsTypeSize) {
SetIndexAllDense(gmat.index.data<uint8_t>(), gmat, nrow, nfeature, noMissingValues);
} else if (gmat_bin_size == kUint16BinsTypeSize) {
SetIndexAllDense(gmat.index.data<uint16_t>(), gmat, nrow, nfeature, noMissingValues);
} else {
CHECK_EQ(gmat_bin_size, kUint32BinsTypeSize);
SetIndexAllDense(gmat.index.data<uint32_t>(), gmat, nrow, nfeature, noMissingValues);
}
/* For sparse DMatrix gmat.index.getBinTypeSize() returns always kUint32BinsTypeSize
but for ColumnMatrix we still have a chance to reduce the memory consumption */
} else {
if (bins_type_size_ == kUint8BinsTypeSize) {
SetIndex<uint8_t>(gmat.index.data<uint32_t>(), gmat, nfeature);
} else if (bins_type_size_ == kUint16BinsTypeSize) {
SetIndex<uint16_t>(gmat.index.data<uint32_t>(), gmat, nfeature);
} else {
CHECK_EQ(bins_type_size_, kUint32BinsTypeSize);
SetIndex<uint32_t>(gmat.index.data<uint32_t>(), gmat, nfeature);
}
}
}
/* Set the number of bytes based on numeric limit of maximum number of bins provided by user */
void SetTypeSize(size_t max_num_bins) {
if ( (max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint8_t>::max()) ) {
bins_type_size_ = kUint8BinsTypeSize;
} else if ((max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint16_t>::max())) {
bins_type_size_ = kUint16BinsTypeSize;
} else {
bins_type_size_ = kUint32BinsTypeSize;
}
}
/* Fetch an individual column. This code should be used with type swith
to determine type of bin id's */
template <typename BinIdxType>
std::unique_ptr<const Column<BinIdxType> > GetColumn(unsigned fid) const {
CHECK_EQ(sizeof(BinIdxType), bins_type_size_);
const size_t feature_offset = feature_offsets_[fid]; // to get right place for certain feature
const size_t column_size = feature_offsets_[fid + 1] - feature_offset;
common::Span<const BinIdxType> bin_index = { reinterpret_cast<const BinIdxType*>(
&index_[feature_offset * bins_type_size_]),
column_size };
std::unique_ptr<const Column<BinIdxType> > res;
if (type_[fid] == ColumnType::kDenseColumn) {
res.reset(new DenseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid],
missing_flags_, feature_offset));
} else {
res.reset(new SparseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid],
{&row_ind_[feature_offset], column_size}));
}
return res;
}
template<typename T>
inline void SetIndexAllDense(T* index, const GHistIndexMatrix& gmat, const size_t nrow,
const size_t nfeature, const bool noMissingValues) {
T* local_index = reinterpret_cast<T*>(&index_[0]);
/* missing values make sense only for column with type kDenseColumn,
and if no missing values were observed it could be handled much faster. */
if (noMissingValues) {
#pragma omp parallel for num_threads(omp_get_max_threads())
for (omp_ulong rid = 0; rid < nrow; ++rid) {
const size_t ibegin = rid*nfeature;
const size_t iend = (rid+1)*nfeature;
size_t j = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
const size_t idx = feature_offsets_[j];
local_index[idx + rid] = index[i];
}
}
} else {
/* to handle rows in all batches, sum of all batch sizes equal to gmat.row_ptr.size() - 1 */
size_t rbegin = 0;
for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) {
const xgboost::Entry* data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
for (size_t rid = 0; rid < batch_size; ++rid) {
const size_t size = offset_vec[rid + 1] - offset_vec[rid];
SparsePage::Inst inst = {data_ptr + offset_vec[rid], size};
const size_t ibegin = gmat.row_ptr[rbegin + rid];
const size_t iend = gmat.row_ptr[rbegin + rid + 1];
CHECK_EQ(ibegin + inst.size(), iend);
size_t j = 0;
size_t fid = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
fid = inst[j].index;
const size_t idx = feature_offsets_[fid];
/* rbegin allows to store indexes from specific SparsePage batch */
local_index[idx + rbegin + rid] = index[i];
missing_flags_[idx + rbegin + rid] = false;
}
}
rbegin += batch.Size();
}
}
}
template<typename T>
inline void SetIndex(uint32_t* index, const GHistIndexMatrix& gmat,
const size_t nfeature) {
std::vector<size_t> num_nonzeros;
num_nonzeros.resize(nfeature);
std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0);
T* local_index = reinterpret_cast<T*>(&index_[0]);
size_t rbegin = 0;
for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) {
const xgboost::Entry* data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
for (size_t rid = 0; rid < batch_size; ++rid) {
const size_t ibegin = gmat.row_ptr[rbegin + rid];
const size_t iend = gmat.row_ptr[rbegin + rid + 1];
size_t fid = 0;
const size_t size = offset_vec[rid + 1] - offset_vec[rid];
SparsePage::Inst inst = {data_ptr + offset_vec[rid], size};
CHECK_EQ(ibegin + inst.size(), iend);
size_t j = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
const uint32_t bin_id = index[i];
fid = inst[j].index;
if (type_[fid] == kDenseColumn) {
T* begin = &local_index[feature_offsets_[fid]];
begin[rid + rbegin] = bin_id - index_base_[fid];
missing_flags_[feature_offsets_[fid] + rid + rbegin] = false;
} else {
T* begin = &local_index[feature_offsets_[fid]];
begin[num_nonzeros[fid]] = bin_id - index_base_[fid];
row_ind_[feature_offsets_[fid] + num_nonzeros[fid]] = rid + rbegin;
++num_nonzeros[fid];
}
}
}
rbegin += batch.Size();
}
}
BinTypeSize GetTypeSize() const {
return bins_type_size_;
}
// This is just an utility function
bool NoMissingValues(const size_t n_elements,
const size_t n_row, const size_t n_features) {
return n_elements == n_features * n_row;
}
// And this returns part of state
bool AnyMissing() const {
return any_missing_;
}
private:
std::vector<uint8_t> index_;
std::vector<size_t> feature_counts_;
std::vector<ColumnType> type_;
std::vector<size_t> row_ind_;
/* indicate where each column's index and row_ind is stored. */
std::vector<size_t> feature_offsets_;
// index_base_[fid]: least bin id for feature fid
uint32_t* index_base_;
std::vector<bool> missing_flags_;
BinTypeSize bins_type_size_;
bool any_missing_;
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_COLUMN_MATRIX_H_
|
target-29.c | #include <omp.h>
#include <stdlib.h>
struct S { char p[64]; int a; int b[2]; long c[4]; int *d; char q[64]; };
__attribute__((noinline, noclone)) void
foo (struct S s)
{
int d = omp_get_default_device ();
int id = omp_get_initial_device ();
int sep = 1;
if (d < 0 || d >= omp_get_num_devices ())
d = id;
int err;
#pragma omp target map(tofrom: s.a, s.b, s.c[1:2], s.d[-2:3]) map(to: sep) map(from: err)
{
err = s.a != 11 || s.b[0] != 12 || s.b[1] != 13;
err |= s.c[1] != 15 || s.c[2] != 16 || s.d[-2] != 18 || s.d[-1] != 19 || s.d[0] != 20;
s.a = 35; s.b[0] = 36; s.b[1] = 37;
s.c[1] = 38; s.c[2] = 39; s.d[-2] = 40; s.d[-1] = 41; s.d[0] = 42;
sep = 0;
}
if (err) abort ();
err = s.a != 35 || s.b[0] != 36 || s.b[1] != 37;
err |= s.c[1] != 38 || s.c[2] != 39 || s.d[-2] != 40 || s.d[-1] != 41 || s.d[0] != 42;
if (err) abort ();
s.a = 50; s.b[0] = 49; s.b[1] = 48;
s.c[1] = 47; s.c[2] = 46; s.d[-2] = 45; s.d[-1] = 44; s.d[0] = 43;
if (sep
&& (omp_target_is_present (&s.a, d)
|| omp_target_is_present (s.b, d)
|| omp_target_is_present (&s.c[1], d)
|| omp_target_is_present (s.d, d)
|| omp_target_is_present (&s.d[-2], d)))
abort ();
#pragma omp target data map(alloc: s.a, s.b, s.c[1:2], s.d[-2:3])
{
if (!omp_target_is_present (&s.a, d)
|| !omp_target_is_present (s.b, d)
|| !omp_target_is_present (&s.c[1], d)
|| !omp_target_is_present (s.d, d)
|| !omp_target_is_present (&s.d[-2], d))
abort ();
#pragma omp target update to(s.a, s.b, s.c[1:2], s.d[-2:3])
#pragma omp target map(alloc: s.a, s.b, s.c[1:2], s.d[-2:3]) map(from: err)
{
err = s.a != 50 || s.b[0] != 49 || s.b[1] != 48;
err |= s.c[1] != 47 || s.c[2] != 46 || s.d[-2] != 45 || s.d[-1] != 44 || s.d[0] != 43;
s.a = 17; s.b[0] = 18; s.b[1] = 19;
s.c[1] = 20; s.c[2] = 21; s.d[-2] = 22; s.d[-1] = 23; s.d[0] = 24;
}
#pragma omp target update from(s.a, s.b, s.c[1:2], s.d[-2:3])
}
if (sep
&& (omp_target_is_present (&s.a, d)
|| omp_target_is_present (s.b, d)
|| omp_target_is_present (&s.c[1], d)
|| omp_target_is_present (s.d, d)
|| omp_target_is_present (&s.d[-2], d)))
abort ();
if (err) abort ();
err = s.a != 17 || s.b[0] != 18 || s.b[1] != 19;
err |= s.c[1] != 20 || s.c[2] != 21 || s.d[-2] != 22 || s.d[-1] != 23 || s.d[0] != 24;
if (err) abort ();
s.a = 33; s.b[0] = 34; s.b[1] = 35;
s.c[1] = 36; s.c[2] = 37; s.d[-2] = 38; s.d[-1] = 39; s.d[0] = 40;
#pragma omp target enter data map(alloc: s.a, s.b, s.c[1:2], s.d[-2:3])
if (!omp_target_is_present (&s.a, d)
|| !omp_target_is_present (s.b, d)
|| !omp_target_is_present (&s.c[1], d)
|| !omp_target_is_present (s.d, d)
|| !omp_target_is_present (&s.d[-2], d))
abort ();
#pragma omp target enter data map(always, to: s.a, s.b, s.c[1:2], s.d[-2:3])
#pragma omp target map(alloc: s.a, s.b, s.c[1:2], s.d[-2:3]) map(from: err)
{
err = s.a != 33 || s.b[0] != 34 || s.b[1] != 35;
err |= s.c[1] != 36 || s.c[2] != 37 || s.d[-2] != 38 || s.d[-1] != 39 || s.d[0] != 40;
s.a = 49; s.b[0] = 48; s.b[1] = 47;
s.c[1] = 46; s.c[2] = 45; s.d[-2] = 44; s.d[-1] = 43; s.d[0] = 42;
}
#pragma omp target exit data map(always, from: s.a, s.b, s.c[1:2], s.d[-2:3])
if (!omp_target_is_present (&s.a, d)
|| !omp_target_is_present (s.b, d)
|| !omp_target_is_present (&s.c[1], d)
|| !omp_target_is_present (s.d, d)
|| !omp_target_is_present (&s.d[-2], d))
abort ();
#pragma omp target exit data map(release: s.a, s.b, s.c[1:2], s.d[-2:3])
if (sep
&& (omp_target_is_present (&s.a, d)
|| omp_target_is_present (s.b, d)
|| omp_target_is_present (&s.c[1], d)
|| omp_target_is_present (s.d, d)
|| omp_target_is_present (&s.d[-2], d)))
abort ();
if (err) abort ();
err = s.a != 49 || s.b[0] != 48 || s.b[1] != 47;
err |= s.c[1] != 46 || s.c[2] != 45 || s.d[-2] != 44 || s.d[-1] != 43 || s.d[0] != 42;
if (err) abort ();
}
int
main ()
{
int d[3] = { 18, 19, 20 };
struct S s = { {}, 11, { 12, 13 }, { 14, 15, 16, 17 }, d + 2, {} };
foo (s);
return 0;
}
|
dropout_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstring>
#include <random>
#include <string>
#include <algorithm>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename DeviceContext, typename T>
class CPUDropoutKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* seed =
context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr;
auto* y = context.Output<Tensor>("Out");
const auto* x_data = x->data<T>();
auto* y_data = y->mutable_data<T>(context.GetPlace());
float dropout_prob = context.Attr<float>("dropout_prob");
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
bool upscale_in_train = (dropout_implementation == "upscale_in_train");
if (!context.Attr<bool>("is_test")) {
auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace());
size_t size = framework::product(mask->dims());
// Special case when dropout_prob is 1.0
if (dropout_prob == 1.0f) {
std::memset(y_data, 0, size * sizeof(*y_data)); // NOLINT
std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT
return;
}
// std::minstd_rand engine;
// NOTE: fixed seed should only be used in unittest or for debug.
// Guarantee to use random seed in training.
int seed_data = 0;
if (seed) {
seed_data = *(seed->data<int>());
} else {
seed_data =
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : 0;
}
auto engine = framework::GetCPURandomEngine(seed_data);
std::uniform_real_distribution<float> dist(0, 1);
for (size_t i = 0; i < size; ++i) {
if (dist(*engine) < dropout_prob) {
mask_data[i] = 0;
y_data[i] = 0;
} else {
mask_data[i] = 1;
if (upscale_in_train) {
y_data[i] = x_data[i] / static_cast<T>(1.0f - dropout_prob);
} else {
y_data[i] = x_data[i];
}
}
}
} else {
if (upscale_in_train) {
const auto* X_data = x->data<T>();
auto* Y_data = y->mutable_data<T>(context.GetPlace());
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < x->numel(); i++) {
Y_data[i] = X_data[i];
}
} else {
auto X = EigenMatrix<T>::Reshape(*x, 1);
auto Y = EigenMatrix<T>::Reshape(*y, 1);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
Y.device(place) = X * static_cast<T>(1.0f - dropout_prob);
}
}
}
};
template <typename DeviceContext, typename T>
class DropoutGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out"));
auto* mask = context.Input<Tensor>("Mask");
grad_x->mutable_data<T>(context.GetPlace());
auto dX = EigenVector<T>::Flatten(*grad_x);
auto dY = EigenVector<T>::Flatten(*grad_y);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
if (context.Attr<bool>("is_test") == true) {
if (dropout_implementation == "upscale_in_train") {
dX.device(place) = static_cast<T>(1) * dY;
} else {
float dropout_prob = context.Attr<float>("dropout_prob");
dX.device(place) = dY * static_cast<T>(1.0f - dropout_prob);
}
} else {
auto M = EigenVector<uint8_t>::Flatten(*mask);
if (dropout_implementation == "upscale_in_train") {
float dropout_prob = context.Attr<float>("dropout_prob");
if (dropout_prob == 1.0f) {
dX.device(place) = static_cast<T>(0) * dY;
} else {
dX.device(place) =
dY * M.cast<T>() / static_cast<T>(1.0f - dropout_prob);
}
} else {
dX.device(place) = dY * M.cast<T>();
}
}
}
};
} // namespace operators
} // namespace paddle
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
double
(*filter)(const double,const ResizeFilter *),
(*window)(const double,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static double
I0(double x),
BesselOrderOne(double),
Sinc(const double, const ResizeFilter *),
SincFast(const double, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const double x,const double support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static double Blackman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.34+cosine*(0.5+cosine*0.16));
}
static double Bohman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine=cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
magick_unreferenced(resize_filter);
return((1.0-x)*cosine+(1.0/MagickPI)*sine);
}
static double Box(const double magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(x);
magick_unreferenced(resize_filter);
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static double Cosine(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Cosine window function:
cos((pi/2)*x).
*/
return((double)cos((double) (MagickPI2*x)));
}
static double CubicBC(const double x,const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static double Gaussian(const double x,const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static double Hann(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.5+0.5*cosine);
}
static double Hamming(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.54+0.46*cosine);
}
static double Jinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
if (x == 0.0)
return(0.5*MagickPI);
return(BesselOrderOne(MagickPI*x)/x);
}
static double Kaiser(const double x,const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static double Lagrange(const double x,const ResizeFilter *resize_filter)
{
double
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static double Quadratic(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static double Sinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((double) 1.0);
}
static double SincFast(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
if (x > 4.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const double xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p);
#endif
}
}
static double Triangle(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static double Welch(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Welch parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterType filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterType filter,const MagickBooleanType cylindrical,
ExceptionInfo *exception)
{
const char
*artifact;
FilterType
filter_type,
window_type;
double
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterType
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterType
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
double
(*function)(const double,const ResizeFilter*),
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
ResizeWeightingFunctionType weightingFunctionType;
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */
{ Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */
{ Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction },
{ Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resize_filter=(ResizeFilter *) AcquireMagickMemory(sizeof(*resize_filter));
if (resize_filter == (ResizeFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur=1.0;
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if ( cylindrical != MagickFalse && (filter_type == SincFastFilter) &&
(filter != SincFastFilter))
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (IsStringTrue(artifact) != MagickFalse)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterType) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterType) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type= cylindrical != MagickFalse ? JincFilter
: SincFastFilter;
window_type=(FilterType) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType;
resize_filter->window=filters[window_type].function;
resize_filter->windowWeightingType=filters[window_type].weightingFunctionType;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickCoreSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(double) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= 2*value; /* increase support linearly */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL)*MagickPI;
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value));
/* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(double) lobes;
}
if (resize_filter->filter == Jinc)
{
/*
Convert a Jinc function lobes value to a real support value.
*/
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long) resize_filter->support)-1];
/*
Blur this filter so support is a integer value (lobes dependant).
*/
if (filter_type == LanczosRadiusFilter)
resize_filter->blur*=floor(resize_filter->support)/
resize_filter->support;
}
/*
Expert blur override.
*/
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(double) MagickEpsilon;
/*
Expert override of the support setting.
*/
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping' window
that calling operator is planning to actually use. (Expert override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for weighting
function. This avoids a division on every filter call.
*/
resize_filter->scale/=resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
{
const double
twoB = B+B;
/*
Convert B,C values into Cubic Coefficents. See CubicBC().
*/
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse)
{
double
support,
x;
/*
Set the weighting function properly when the weighting function
may not exactly match the filter of the same name. EG: a Point
filter is really uses a Box weighting function with a different
support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,
"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(),(double)resize_filter->blur);
if ((filter_type == GaussianFilter) || (window_type == GaussianFilter))
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(),(double)resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),(double)resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double)support);
if ( filter_type == CubicFilter || window_type == CubicFilter )
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,
GetMagickPrecision(),(double)
GetResizeFilterWeight(resize_filter,x));
/*
A final value so gnuplot can graph the 'stop' properly.
*/
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
Image
*resize_image;
resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% double BesselOrderOne(double x)
%
% A description of each parameter follows:
%
% o x: double value.
%
*/
#undef I0
static double I0(double x)
{
double
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((double) i*i);
}
return(sum);
}
#undef J1
static double J1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static double P1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static double Q1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static double BesselOrderOne(double x)
{
double
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
resize_filter->signature=(~MagickCoreSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% double GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickPrivate double *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return((double *) resize_filter->coefficient);
}
MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->blur);
}
MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->scale);
}
MagickPrivate double GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->window_support);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->filterWeightingType);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->windowWeightingType);
}
MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% double GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const double x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter,
const double x)
{
double
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
PointInfo
offset;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
continue;
offset.y=((double) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(resize_image,q) == 0)
{
q+=GetPixelChannels(resize_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
offset.x=((double) x+0.5)*scale.x-0.5;
status=InterpolatePixelChannels(image,image_view,resize_image,method,
offset.x,offset.y,q,exception);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InterpolativeResizeImage)
#endif
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,const size_t columns,
% const size_t rows,const double delta_x,const double rigidity,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*image_view,
*rescale_view;
gfloat
*packet,
*pixels;
Image
*rescale_image;
int
x_offset,
y_offset;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MemoryInfo
*pixel_info;
register gfloat
*q;
ssize_t
y;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,exception));
pixel_info=AcquireVirtualMemory(image->columns,image->rows*
GetPixelChannels(image)*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info);
status=MagickTrue;
q=pixels;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
*q++=QuantumScale*p[i];
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows,
(int) GetPixelChannels(image),LQR_COLDEPTH_32F);
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
(void) lqr_carver_scan_reset(carver);
while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0)
{
register Quantum
*magick_restrict q;
register ssize_t
i;
q=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1,
exception);
if (q == (Quantum *) NULL)
break;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
rescale_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
rescale_traits=GetPixelChannelTraits(rescale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(rescale_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange*
packet[i]),q);
}
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
Image
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue,
exception);
if (magnify_image == (Image *) NULL)
return((Image *) NULL);
/*
Magnify image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,magnify_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
/*
Magnify this row of pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity[9];
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict r;
register ssize_t
i;
size_t
channels;
p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
channels=GetPixelChannels(image);
for (i=0; i < 9; i++)
intensity[i]=GetPixelIntensity(image,p+i*channels);
r=q;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
q+=2*GetPixelChannels(magnify_image);
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MagnifyImage)
#endif
proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterType filter,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterType filter,ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ?
72.0 : image->resolution.x)+0.5);
height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ?
72.0 : image->resolution.y)+0.5);
resample_image=ResizeImage(image,width,height,filter,exception);
if (resample_image != (Image *) NULL)
{
resample_image->resolution.x=x_resolution;
resample_image->resolution.y=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,const size_t rows,
% const FilterType filter,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
double
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const double x_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
double
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) == 0))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j-start].pixel-contribution[0].pixel);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
/*
Alpha blending.
*/
gamma=0.0;
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight*QuantumScale*
GetPixelAlpha(image,p+k*GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HorizontalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const double y_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
double
scale,
support;
MagickBooleanType
status;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) == 0))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
gamma=0.0;
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k*
GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_VerticalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterType filter,ExceptionInfo *exception)
{
double
x_factor,
y_factor;
FilterType
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter))
return(CloneImage(image,0,0,MagickTrue,exception));
/*
Acquire resize filter.
*/
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->alpha_trait != UndefinedPixelTrait) ||
((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
resize_image=AccelerateResizeImage(image,columns,rows,resize_filter,
exception);
if (resize_image != (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
#endif
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(DestroyImage(resize_image));
}
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Set the sampling offset, default is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x=0; x < (ssize_t) sample_image->columns; x++)
x_offset[x]=(ssize_t) ((((double) x+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,sample_image,1,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(sample_image,q) == 0)
{
q+=GetPixelChannels(sample_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++)
{
PixelChannel
channel;
PixelTrait
sample_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
sample_traits=GetPixelChannelTraits(sample_image,channel);
if ((traits == UndefinedPixelTrait) ||
(sample_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels(
image)+i],q);
}
q+=GetPixelChannels(sample_image);
}
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SampleImage)
#endif
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
double
alpha,
pixel[CompositePixelChannel],
*scale_scanline,
*scanline,
*x_vector,
*y_vector;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
PixelChannel
channel;
PixelTrait
scale_traits,
traits;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
n,
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse)
{
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
GetPixelChannels(image)*sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(double *) AcquireQuantumMemory((size_t) image->columns,
GetPixelChannels(image)*sizeof(*scanline));
scale_scanline=(double *) AcquireQuantumMemory((size_t)
scale_image->columns,GetPixelChannels(image)*sizeof(*scale_scanline));
y_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
GetPixelChannels(image)*sizeof(*y_vector));
if ((scanline == (double *) NULL) ||
(scale_scanline == (double *) NULL) ||
(x_vector == (double *) NULL) ||
(y_vector == (double *) NULL))
{
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) ResetMagickMemory(y_vector,0,(size_t) GetPixelChannels(image)*
image->columns*sizeof(*y_vector));
n=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
y_vector[x*GetPixelChannels(image)+i]+=scale.y*
x_vector[x*GetPixelChannels(image)+i];
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
next_row=MagickFalse;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y*
x_vector[x*GetPixelChannels(image)+i];
scanline[x*GetPixelChannels(image)+i]=pixel[i];
y_vector[x*GetPixelChannels(image)+i]=0.0;
}
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) == 0)
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[
x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
else
{
ssize_t
t;
/*
Scale X direction.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
span.x=1.0;
t=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i];
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
}
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i];
span.x-=scale.x;
}
}
if (span.x > 0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i];
}
if ((next_column == MagickFalse) &&
(t < (ssize_t) scale_image->columns))
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) == 0)
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scale_scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*
scale_scanline[x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(double *) RelinquishMagickMemory(scanline);
x_vector=(double *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
*url,
value[MagickPathExtent];
const char
*name;
Image
*thumbnail_image;
double
x_factor,
y_factor;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MagickPathExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception);
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if ( GetPathAttributes(image->filename,&attributes) != MagickFalse )
{
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value,exception);
}
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent,
value);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception);
(void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception);
url=GetMagickHomeURL();
(void) SetImageProperty(thumbnail_image,"software",url,exception);
url=DestroyString(url);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value,
exception);
return(thumbnail_image);
}
|
gost_fmt_plug.c | /*
* GOST 3411 cracker patch for JtR. Hacked together during
* May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>,
* Sergey V. <sftp.mtuci at gmail com>, and JimF
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* Sergey V. <sftp.mtuci at gmail com>, and JimF
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format => user:gost-hash;
* user:$gost$gost-hash;
* user:$gost-cp$gost-cryptopro-hash;
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_gost;
#elif FMT_REGISTERS_H
john_register_one(&fmt_gost);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "gost.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 512 // tuned K8-dual HT
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "gost"
#define FORMAT_NAME "GOST R 34.11-94"
#define FORMAT_TAG "$gost$"
#define TAG_LENGTH 6
#define FORMAT_TAG_CP "$gost-cp$"
#define TAG_CP_LENGTH 9
#if !defined(USE_GCC_ASM_IA32) && defined(USE_GCC_ASM_X64)
#define ALGORITHM_NAME "64/64"
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 64
#define BINARY_SIZE 32
#define SALT_SIZE 1
#define SALT_ALIGN 1
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests gost_tests[] = {
{"ce85b99cc46752fffee35cab9a7b0278abb4c2d2055cff685af4912c49490f8d", ""},
{"d42c539e367c66e9c88a801f6649349c21871b4344c6a573f849fdce62f314dd", "a"},
{FORMAT_TAG "ce85b99cc46752fffee35cab9a7b0278abb4c2d2055cff685af4912c49490f8d", ""},
{FORMAT_TAG "d42c539e367c66e9c88a801f6649349c21871b4344c6a573f849fdce62f314dd", "a"},
{FORMAT_TAG "ad4434ecb18f2c99b60cbe59ec3d2469582b65273f48de72db2fde16a4889a4d", "message digest"},
{FORMAT_TAG "0886f91e7fcaff65eb2635a1a4c9f203003e0ce5ea74b72fc6462cc72649694e",
"This is very very long pass phrase for test gost hash function."},
{FORMAT_TAG_CP "981e5f3ca30c841487830f84fb433e13ac1101569b9c13584ac483234cd656c0", ""},
{FORMAT_TAG_CP "e74c52dd282183bf37af0079c9f78055715a103f17e3133ceff1aacf2f403011", "a"},
{FORMAT_TAG_CP "bc6041dd2aa401ebfa6e9886734174febdb4729aa972d60f549ac39b29721ba0", "message digest"},
{FORMAT_TAG_CP "5394adfacb65a9ac5781c3080b244c955a9bf03befd51582c3850b8935f80762",
"This is very very long pass phrase for test gost hash function."},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[8];
static int is_cryptopro; /* non 0 for CryptoPro hashes */
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
gost_init_table();
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
else if (!strncmp(p, FORMAT_TAG_CP, TAG_CP_LENGTH))
p += TAG_CP_LENGTH;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_CP_LENGTH + CIPHERTEXT_LENGTH + 1];
char *cp=&out[TAG_LENGTH];
strcpy(out, FORMAT_TAG);
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
else if (!strncmp(ciphertext, FORMAT_TAG_CP, TAG_CP_LENGTH)) {
ciphertext += TAG_CP_LENGTH;
strcpy(out, FORMAT_TAG_CP);
cp=&out[TAG_CP_LENGTH];
}
memcpy(cp, ciphertext, CIPHERTEXT_LENGTH + 1);
strlwr(cp);
return out;
}
static void *get_salt(char *ciphertext)
{
static char i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
i=0;
else
i=1;
return &i;
}
static void set_salt(void *salt)
{
is_cryptopro = *(char*)salt;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = ciphertext + TAG_LENGTH;
else
p = ciphertext + TAG_CP_LENGTH;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
gost_ctx ctx;
if (is_cryptopro)
john_gost_cryptopro_init(&ctx);
else
john_gost_init(&ctx);
john_gost_update(&ctx, (const unsigned char*)saved_key[index],
strlen(saved_key[index]));
john_gost_final(&ctx, (unsigned char *)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (crypt_out[index][0] == *(ARCH_WORD_32*)binary)
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_gost = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
gost_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
pr80809-3.c | /* PR middle-end/80809 */
/* { dg-do run } */
__attribute__((noinline, noclone)) void
foo (int x)
{
int i, v[x], w[16];
for (i = 0; i < x; i++)
v[i] = i;
for (i = 0; i < 16; i++)
w[i] = 0;
#pragma omp parallel
#pragma omp single
{
int z[x];
for (i = 0; i < x; i++)
z[0] = 0;
for (i = 0; i < 16; i++)
#pragma omp task firstprivate (z) firstprivate (v)
{
int j;
for (j = 0; j < x; j++)
z[j] = i;
for (j = 0; j < x; j++)
v[j] += z[j];
for (j = 0; j < x; j++)
w[i] += v[j];
}
}
for (i = 0; i < 16; i++)
if (w[i] != (x - 1) * x / 2 + x * i)
__builtin_abort ();
}
int
main ()
{
foo (4);
foo (27);
foo (196);
return 0;
}
|
mult_impl_basic.h | #ifndef _MULT_IMPL_BASIC_H
#define _MULT_IMPL_BASIC_H
#define DO_REORDER //Testing on my laptop indicated its better to reorder the matrices to improve cache usage
//Implementations for meson field contractions
template<typename mf_Policies,
template <typename> class lA2AfieldL, template <typename> class lA2AfieldR,
template <typename> class rA2AfieldL, template <typename> class rA2AfieldR
>
class _mult_impl{ //necessary to avoid an annoying ambigous overload when mesonfield friends mult
public:
//Matrix product of meson field pairs
//out(t1,t4) = l(t1,t2) * r(t3,t4) (The stored timeslices are only used to unpack TimePackedIndex so it doesn't matter if t2 and t3 are thrown away; their indices are contracted over hence the times are not needed)
static void mult(A2AmesonField<mf_Policies,lA2AfieldL,rA2AfieldR> &out, const A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR> &l, const A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR> &r, const bool node_local){
typedef typename mf_Policies::ScalarComplexType ScalarComplexType;
assert( (void*)&out != (void*)&l || (void*)&out != (void*)&r );
if(! l.getColParams().paramsEqual( r.getRowParams() ) ){
if(!UniqueID()){
printf("mult(): Illegal matrix product: underlying vector parameters must match\n"); fflush(stdout);
std::cout << "left-column: " << l.getColParams().print() << "\n";
std::cout << "right-row: " << r.getRowParams().print() << "\n";
std::cout.flush();
}
exit(-1);
}
out.setup(l.getRowParams(),r.getColParams(), l.tl, r.tr ); //zeroes output, so safe to re-use
int ni = l.getNrows();
int nk = r.getNcols();
int work = ni*nk;
int node_work, node_off; bool do_work;
getNodeWork(work,node_work,node_off,do_work,node_local);
typedef typename A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR>::RightDilutionType LeftDilutionType;
typedef typename A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR>::LeftDilutionType RightDilutionType;
ModeContractionIndices<LeftDilutionType,RightDilutionType> j_ind2(l.getColParams());
if(do_work){
Float time = -dclock();
modeIndexSet lmodeparams; lmodeparams.time = l.tr;
modeIndexSet rmodeparams; rmodeparams.time = r.tl;
int nj = j_ind2.getNindices(lmodeparams,rmodeparams);
//complex mult re = re*re - im*im, im = re*im + im*re //6 flops
//complex add 2 flops
Float flops_total = Float(ni)*Float(nk)*Float(nj)*8.;
int jlmap[nj], jrmap[nj];
for(int j = 0; j < nj; j++)
j_ind2.getBothIndices(jlmap[j],jrmap[j],j,lmodeparams,rmodeparams);
# ifndef DO_REORDER
#pragma omp parallel for
for(int ik = node_off; ik < node_off + node_work; ++ik){
int i = ik % ni;
int k = ik / ni;
for(int j = 0; j < nj; j++)
out(i,k) += l(i,jlmap[j]) * r(jrmap[j],k);
}
# else
A2AmesonField<mf_Policies,lA2AfieldL,lA2AfieldR> lreord;
l.colReorder(lreord,jlmap,nj);
A2AmesonField<mf_Policies,rA2AfieldL,rA2AfieldR> rreord;
r.rowReorder(rreord,jrmap,nj);
//A2AmesonField<mf_Policies,rA2AfieldR,rA2AfieldL> rreord_T;
//rreord.transpose(rreord_T); //more efficient memory access
static const int lcol_stride = 1;
int rrow_stride = rreord.getNcols();
#pragma omp parallel for
for(int ik = node_off; ik < node_off + node_work; ++ik){
int i = ik % ni;
int k = ik / ni;
ScalarComplexType const* lbase = &lreord(i,0);
ScalarComplexType const* rbase = &rreord(0,k);
//std::complex<mf_Complex> const* rbase = &rreord_T(k,0);
for(int j = 0; j < nj; ++j){
out(i,k) += (*lbase)*(*rbase);
lbase += lcol_stride;
rbase += rrow_stride;
//++lbase;
//++rbase;
}
}
# endif
time += dclock();
Float flops_per_sec = flops_total/time;
if(!UniqueID()) printf("node mult flops/s %g (time %f total flops %g)\n",flops_per_sec,time,flops_total);
}
if(!node_local) out.nodeSum();
}
};
#endif
|
Ransac.h | /**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "saiga/core/util/Thread/omp.h"
#include "saiga/vision/VisionTypes.h"
namespace Saiga
{
// This seed is used for all ransac classes.
// You can change this in your application for example to:
// ransacSeed = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
SAIGA_VISION_API extern uint64_t ransacRandomSeed;
struct RansacParameters
{
int maxIterations = -1;
// compared to the value which is returned from computeResidual.
// usually you want to return the squared norm
double residualThreshold;
// expected maximum number of N
// internal data structures will be reserved to this size
int reserveN = 0;
// Number of omp threads in that group
// Note:
int threads = 1;
};
template <typename Derived, typename Model, int ModelSize>
class RansacBase
{
public:
void init(const RansacParameters& _params)
{
params = _params;
SAIGA_ASSERT(params.maxIterations > 0);
SAIGA_ASSERT(OMP::getNumThreads() == 1);
numInliers.resize(params.maxIterations);
models.resize(params.maxIterations);
residuals.resize(params.maxIterations);
for (auto&& r : residuals) r.reserve(params.reserveN);
inliers.resize(params.maxIterations);
for (auto&& r : inliers) r.reserve(params.reserveN);
SAIGA_ASSERT(params.threads >= 1);
generators.resize(params.threads);
threadLocalBestModel.resize(params.threads);
for (int i = 0; i < params.threads; ++i)
{
generators[i].seed(ransacRandomSeed + 6643838879UL * i);
}
}
const RansacParameters& Params() const { return params; }
protected:
// indices of subset
using Subset = std::array<int, ModelSize>;
RansacBase() {}
RansacBase(const RansacParameters& _params) { init(_params); }
int compute(int _N)
{
SAIGA_ASSERT(params.maxIterations > 0);
SAIGA_ASSERT(OMP::getNumThreads() == params.threads);
int tid = OMP::getThreadNum();
// compute random sample subsets
std::uniform_int_distribution<int> dis(0, _N - 1);
auto& gen = generators[tid];
auto& bestModel = threadLocalBestModel[tid]();
bestModel = {0, 0};
#pragma omp for
for (int it = 0; it < params.maxIterations; ++it)
{
auto& model = models[it];
auto& inlier = inliers[it];
auto& residual = residuals[it];
auto& numInlier = numInliers[it];
numInlier = 0;
residual.resize(_N);
inlier.resize(_N);
Subset set;
for (auto j : Range(0, ModelSize))
{
auto idx = dis(gen);
set[j] = idx;
}
if (!derived().computeModel(set, model)) continue;
for (int j = 0; j < _N; ++j)
{
residual[j] = derived().computeResidual(model, j);
bool inl = residual[j] < params.residualThreshold;
inlier[j] = inl;
numInlier += inl;
}
if (numInlier > bestModel.first)
{
bestModel.first = numInlier;
bestModel.second = it;
}
}
#pragma omp single
{
N = _N;
bestIdx = 0;
int bestCount = 0;
for (int th = 0; th < params.threads; ++th)
{
auto&& thbestModel = threadLocalBestModel[th]();
auto inl = thbestModel.first;
auto it = thbestModel.second;
// std::cout << "th best " << th << " " << it << " " << inl << std::endl;
if (inl > bestCount)
{
bestCount = inl;
bestIdx = it;
}
}
}
return bestIdx;
}
// total number of sample points
int N;
RansacParameters params;
AlignedVector<std::vector<double>> residuals;
AlignedVector<std::vector<char>> inliers;
AlignedVector<int> numInliers;
AlignedVector<Model> models;
// make sure we don't run into false sharing
AlignedVector<AlignedStruct<std::pair<int, int>, SAIGA_CACHE_LINE_SIZE>> threadLocalBestModel;
// each thread has one generator
std::vector<std::mt19937> generators;
int bestIdx;
private:
Derived& derived() { return *static_cast<Derived*>(this); }
};
inline int RansacIterationsFromProbability(int input_N, double probability, int minInliers, int maxIterations)
{
// Adjust Parameters according to number of correspondences
float epsilon = (float)minInliers / input_N;
// Set RANSAC iterations according to probability, epsilon, and max iterations
int nIterations;
if (minInliers >= input_N)
nIterations = 1;
else
nIterations = ceil(log(1 - probability) / log(1 - pow(epsilon, 3)));
nIterations = std::max(1, std::min(nIterations, maxIterations));
return nIterations;
}
// double inlierProb = 0.7;
// double successProb = 0.999;
// double k = log(1 - successProb) / log(1 - pow(inlierProb, 5));
// std::cout << k << std::endl;
} // namespace Saiga
|
omp_hash_set.h | #ifndef omp_hash_set_H_
#define omp_hash_set_H_
#include <array>
#include <functional>
#include <memory>
#include <vector>
#include "omp.h"
// A high performance concurrent hash map based on OpenMP.
template <class K, class H = std::hash<K>>
class omp_hash_set {
public:
omp_hash_set();
~omp_hash_set();
// Set the number of buckets in the container to be at least the specified value.
void reserve(const size_t n_buckets) {
const size_t n_rehashing_buckets = get_n_rehashing_buckets(n_buckets);
rehash(n_rehashing_buckets);
};
// Return the number of buckets.
size_t get_n_buckets() const { return n_buckets; };
// Return the current load factor (the ratio between the number of keys and buckets).
double get_load_factor() const { return static_cast<double>(n_keys) / n_buckets; }
// Return the max load factor beyond which an automatic rehashing will occur.
double get_max_load_factor() const { return max_load_factor; }
// Set the max load factor beyond which an automatic rehashing will occur.
void set_max_load_factor(const double max_load_factor) {
this->max_load_factor = max_load_factor;
}
// Return the number of keys.
size_t get_n_keys() const { return n_keys; }
// Set the specified key.
void add(const K& key);
// Remove the specified key.
void remove(const K& key);
// Test if the specified key exists.
bool has(const K& key);
// Return the reduced value of the mapped values of all the keys.
// If no key exists, return the default value.
template <class W>
W map_reduce(
const std::function<W(const K&)>& mapper,
const std::function<void(W&, const W&)>& reducer,
const W& default_value);
// Apply the handler to all the keys.
void apply(const std::function<void(const K&)>& handler);
// Clear all keys.
void clear();
private:
size_t n_keys;
size_t n_buckets;
double max_load_factor;
size_t n_threads;
// The entire hash map is divided into several segments (depends on how many threads).
// Each segment can be locked and accessed independently in parallel.
size_t n_segments;
H hasher;
std::vector<omp_lock_t> segment_locks;
// For parallel rehashing (Require omp_set_nested(1)).
std::vector<omp_lock_t> rehashing_segment_locks;
constexpr static size_t N_INITIAL_BUCKETS = 11;
constexpr static size_t N_SEGMENTS_PER_THREAD = 7;
constexpr static double DEFAULT_MAX_LOAD_FACTOR = 1.0;
struct hash_node {
K key;
std::unique_ptr<hash_node> next;
hash_node(const K& key) : key(key){};
};
std::vector<std::unique_ptr<hash_node>> buckets;
// Set the number of buckets to be at least the number of current keys times max load factor.
void rehash() { reserve(n_keys / max_load_factor); }
void rehash(const size_t n_rehashing_buckets);
// Get the number of hash buckets to use.
// This number shall be larger than or equal to the specified number.
size_t get_n_rehashing_buckets(const size_t n_buckets) const;
// Apply node_handler to the hash node which has the specific key.
// If the key does not exist, apply to the unassociated node from the corresponding bucket.
void hash_node_apply(
const K& key, const std::function<void(std::unique_ptr<hash_node>&)>& node_handler);
// Apply node_handler to all the hash nodes.
void hash_node_apply(const std::function<void(std::unique_ptr<hash_node>&)>& node_handler);
// Recursively find the node with the specified key on the list starting from the node specified.
// Then apply the specified handler to that node.
// If the key does not exist, apply the handler to the unassociated node at the end of the list.
void hash_node_apply_recursive(
std::unique_ptr<hash_node>& node,
const K& key,
const std::function<void(std::unique_ptr<hash_node>&)>& node_handler);
// Recursively apply the handler to each node on the list from the node specified (post-order).
void hash_node_apply_recursive(
std::unique_ptr<hash_node>& node,
const std::function<void(std::unique_ptr<hash_node>&)>& node_handler);
void lock_all_segments();
void unlock_all_segments();
};
template <class K, class H>
omp_hash_set<K, H>::omp_hash_set() {
n_keys = 0;
n_buckets = N_INITIAL_BUCKETS;
buckets.resize(n_buckets);
max_load_factor = DEFAULT_MAX_LOAD_FACTOR;
n_threads = omp_get_max_threads();
n_segments = n_threads * N_SEGMENTS_PER_THREAD;
segment_locks.resize(n_segments);
rehashing_segment_locks.resize(n_segments);
for (auto& lock : segment_locks) omp_init_lock(&lock);
for (auto& lock : rehashing_segment_locks) omp_init_lock(&lock);
}
template <class K, class H>
omp_hash_set<K, H>::~omp_hash_set() {
clear();
for (auto& lock : segment_locks) omp_destroy_lock(&lock);
for (auto& lock : rehashing_segment_locks) omp_destroy_lock(&lock);
}
template <class K, class H>
void omp_hash_set<K, H>::rehash(const size_t n_rehashing_buckets) {
lock_all_segments();
// No decrease in the number of buckets.
if (n_buckets >= n_rehashing_buckets) {
unlock_all_segments();
return;
}
// Rehash.
std::vector<std::unique_ptr<hash_node>> rehashing_buckets(n_rehashing_buckets);
const auto& node_handler = [&](std::unique_ptr<hash_node>& node) {
const auto& rehashing_node_handler = [&](std::unique_ptr<hash_node>& rehashing_node) {
rehashing_node = std::move(node);
rehashing_node->next.reset();
};
const K& key = node->key;
const size_t hash_value = hasher(key);
const size_t bucket_id = hash_value % n_rehashing_buckets;
const size_t segment_id = bucket_id % n_segments;
auto& lock = rehashing_segment_locks[segment_id];
omp_set_lock(&lock);
hash_node_apply_recursive(rehashing_buckets[bucket_id], key, rehashing_node_handler);
omp_unset_lock(&lock);
};
#pragma omp parallel for
for (size_t i = 0; i < n_buckets; i++) {
hash_node_apply_recursive(buckets[i], node_handler);
}
buckets = std::move(rehashing_buckets);
n_buckets = n_rehashing_buckets;
unlock_all_segments();
}
template <class K, class H>
size_t omp_hash_set<K, H>::get_n_rehashing_buckets(const size_t n_buckets_in) const {
// Returns a number that is greater than or equal to n_buckets_in.
// That number is either a prime number itself, or a product of two prime numbers.
// Returns a number that is greater than or equal to n_buckets_in.
// That number is either a prime number itself, or a product of two prime numbers.
constexpr size_t PRIME_NUMBERS[] = {11, 17, 29, 47, 79, 127, 211,
337, 547, 887, 1433, 2311, 3739, 6053,
9791, 15858, 25667, 41539, 67213, 104729};
constexpr size_t N_PRIME_NUMBERS = sizeof(PRIME_NUMBERS) / sizeof(size_t);
constexpr size_t LAST_PRIME_NUMBER = PRIME_NUMBERS[N_PRIME_NUMBERS - 1];
constexpr size_t DIVISION_FACTOR = 15858;
size_t remaining_factor = n_buckets_in;
size_t n_rehashing_buckets = 1;
for (size_t i = 0; i < 3; i++) {
if (remaining_factor > LAST_PRIME_NUMBER) {
remaining_factor /= DIVISION_FACTOR;
n_rehashing_buckets *= DIVISION_FACTOR;
}
}
if (remaining_factor > LAST_PRIME_NUMBER) throw std::invalid_argument("n_buckets too large");
size_t left = 0, right = N_PRIME_NUMBERS - 1;
while (left < right) {
size_t mid = (left + right) / 2;
if (PRIME_NUMBERS[mid] < remaining_factor) {
left = mid + 1;
} else {
right = mid;
}
}
n_rehashing_buckets *= PRIME_NUMBERS[left];
return n_rehashing_buckets;
}
template <class K, class H>
void omp_hash_set<K, H>::add(const K& key) {
const auto& node_handler = [&](std::unique_ptr<hash_node>& node) {
if (!node) {
node.reset(new hash_node(key));
#pragma omp atomic
n_keys++;
}
};
hash_node_apply(key, node_handler);
if (n_keys >= n_buckets * max_load_factor) rehash();
}
template <class K, class H>
void omp_hash_set<K, H>::remove(const K& key) {
const auto& node_handler = [&](std::unique_ptr<hash_node>& node) {
if (node) {
node = std::move(node->next);
#pragma omp atomic
n_keys--;
}
};
hash_node_apply(key, node_handler);
}
template <class K, class H>
bool omp_hash_set<K, H>::has(const K& key) {
bool has_key = false;
const auto& node_handler = [&](const std::unique_ptr<hash_node>& node) {
if (node) has_key = true;
};
hash_node_apply(key, node_handler);
return has_key;
}
template <class K, class H>
template <class W>
W omp_hash_set<K, H>::map_reduce(
const std::function<W(const K&)>& mapper,
const std::function<void(W&, const W&)>& reducer,
const W& default_value) {
std::vector<W> thread_reduced_values(n_threads, default_value);
W reduced_value = default_value;
const auto& node_handler = [&](std::unique_ptr<hash_node>& node) {
const size_t thread_id = omp_get_thread_num();
const W& mapped_value = mapper(node->key);
reducer(thread_reduced_values[thread_id], mapped_value);
};
hash_node_apply(node_handler);
for (const auto& value : thread_reduced_values) reducer(reduced_value, value);
return reduced_value;
}
template <class K, class H>
void omp_hash_set<K, H>::apply(const std::function<void(const K&)>& handler) {
const auto& node_handler = [&](std::unique_ptr<hash_node>& node) { handler(node->key); };
hash_node_apply(node_handler);
}
template <class K, class H>
void omp_hash_set<K, H>::clear() {
lock_all_segments();
#pragma omp parallel for
for (size_t i = 0; i < n_buckets; i++) {
buckets[i].reset();
}
buckets.resize(N_INITIAL_BUCKETS);
for (auto& bucket : buckets) bucket.reset();
n_keys = 0;
unlock_all_segments();
}
template <class K, class H>
void omp_hash_set<K, H>::hash_node_apply(
const K& key, const std::function<void(std::unique_ptr<hash_node>&)>& node_handler) {
const size_t hash_value = hasher(key);
bool applied = false;
while (!applied) {
const size_t n_buckets_snapshot = n_buckets;
const size_t bucket_id = hash_value % n_buckets_snapshot;
const size_t segment_id = bucket_id % n_segments;
auto& lock = segment_locks[segment_id];
omp_set_lock(&lock);
if (n_buckets_snapshot != n_buckets) {
omp_unset_lock(&lock);
continue;
}
hash_node_apply_recursive(buckets[bucket_id], key, node_handler);
omp_unset_lock(&lock);
applied = true;
}
}
template <class K, class H>
void omp_hash_set<K, H>::hash_node_apply(
const std::function<void(std::unique_ptr<hash_node>&)>& node_handler) {
lock_all_segments();
// For a good hash function, a static schedule shall provide both a good balance and speed.
#pragma omp parallel for
for (size_t i = 0; i < n_buckets; i++) {
hash_node_apply_recursive(buckets[i], node_handler);
}
unlock_all_segments();
}
template <class K, class H>
void omp_hash_set<K, H>::hash_node_apply_recursive(
std::unique_ptr<hash_node>& node,
const K& key,
const std::function<void(std::unique_ptr<hash_node>&)>& node_handler) {
if (node) {
if (node->key == key) {
node_handler(node);
} else {
hash_node_apply_recursive(node->next, key, node_handler);
}
} else {
node_handler(node);
}
}
template <class K, class H>
void omp_hash_set<K, H>::hash_node_apply_recursive(
std::unique_ptr<hash_node>& node,
const std::function<void(std::unique_ptr<hash_node>&)>& node_handler) {
if (node) {
// Post-order traversal for rehashing.
hash_node_apply_recursive(node->next, node_handler);
node_handler(node);
}
}
template <class K, class H>
void omp_hash_set<K, H>::lock_all_segments() {
for (auto& lock : segment_locks) omp_set_lock(&lock);
}
template <class K, class H>
void omp_hash_set<K, H>::unlock_all_segments() {
for (auto& lock : segment_locks) omp_unset_lock(&lock);
}
#endif |
rt_dpltmg.c | #include "runtime.h"
void RT_CORE_dpltmg( Quark *quark, Quark_Task_Flags *task_flags,
PLASMA_enum mtxtype, int m, int n, double *A, int lda,
int gM, int gN, int m0, int n0, unsigned long long int seed )
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_dpltmg(quark, task_flags,
mtxtype, m, n, A, lda,
gM, gN, m0, n0, seed );
} else if (plasma->runtime == PLASMA_OMPSS) {
#pragma omp target device (smp) copy_deps
#pragma omp task out([lda*n]A) label(dpltmg)
CORE_dpltmg_rt(mtxtype, m, n, A, lda, gM, gN, m0, n0, seed);
}
}
void CORE_dpltmg_rt(int mtxtype, int m, int n, double *A, int lda, int gM, int gN, int m0, int n0, unsigned long long int seed)
{
CORE_dpltmg(mtxtype, m, n, A, lda, gM, gN, m0, n0, seed);
}
|
cxx-pretty-print.c | /* Implementation of subroutines for the GNU C++ pretty-printer.
Copyright (C) 2003-2020 Free Software Foundation, Inc.
Contributed by Gabriel Dos Reis <gdr@integrable-solutions.net>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "cp-tree.h"
#include "cxx-pretty-print.h"
#include "tree-pretty-print.h"
static void pp_cxx_unqualified_id (cxx_pretty_printer *, tree);
static void pp_cxx_nested_name_specifier (cxx_pretty_printer *, tree);
static void pp_cxx_qualified_id (cxx_pretty_printer *, tree);
static void pp_cxx_template_argument_list (cxx_pretty_printer *, tree);
static void pp_cxx_type_specifier_seq (cxx_pretty_printer *, tree);
static void pp_cxx_ptr_operator (cxx_pretty_printer *, tree);
static void pp_cxx_parameter_declaration_clause (cxx_pretty_printer *, tree);
static void pp_cxx_template_parameter (cxx_pretty_printer *, tree);
static void pp_cxx_cast_expression (cxx_pretty_printer *, tree);
static void pp_cxx_typeid_expression (cxx_pretty_printer *, tree);
static void pp_cxx_unary_left_fold_expression (cxx_pretty_printer *, tree);
static void pp_cxx_unary_right_fold_expression (cxx_pretty_printer *, tree);
static void pp_cxx_binary_fold_expression (cxx_pretty_printer *, tree);
static void pp_cxx_concept_definition (cxx_pretty_printer *, tree);
static inline void
pp_cxx_nonconsecutive_character (cxx_pretty_printer *pp, int c)
{
const char *p = pp_last_position_in_text (pp);
if (p != NULL && *p == c)
pp_cxx_whitespace (pp);
pp_character (pp, c);
pp->padding = pp_none;
}
#define pp_cxx_expression_list(PP, T) \
pp_c_expression_list (PP, T)
#define pp_cxx_space_for_pointer_operator(PP, T) \
pp_c_space_for_pointer_operator (PP, T)
#define pp_cxx_init_declarator(PP, T) \
pp_c_init_declarator (PP, T)
#define pp_cxx_call_argument_list(PP, T) \
pp_c_call_argument_list (PP, T)
void
pp_cxx_colon_colon (cxx_pretty_printer *pp)
{
pp_colon_colon (pp);
pp->padding = pp_none;
}
void
pp_cxx_begin_template_argument_list (cxx_pretty_printer *pp)
{
pp_cxx_nonconsecutive_character (pp, '<');
}
void
pp_cxx_end_template_argument_list (cxx_pretty_printer *pp)
{
pp_cxx_nonconsecutive_character (pp, '>');
}
void
pp_cxx_separate_with (cxx_pretty_printer *pp, int c)
{
pp_separate_with (pp, c);
pp->padding = pp_none;
}
/* Expressions. */
/* conversion-function-id:
operator conversion-type-id
conversion-type-id:
type-specifier-seq conversion-declarator(opt)
conversion-declarator:
ptr-operator conversion-declarator(opt) */
static inline void
pp_cxx_conversion_function_id (cxx_pretty_printer *pp, tree t)
{
pp_cxx_ws_string (pp, "operator");
pp_cxx_type_specifier_seq (pp, TREE_TYPE (t));
}
static inline void
pp_cxx_template_id (cxx_pretty_printer *pp, tree t)
{
pp_cxx_unqualified_id (pp, TREE_OPERAND (t, 0));
pp_cxx_begin_template_argument_list (pp);
pp_cxx_template_argument_list (pp, TREE_OPERAND (t, 1));
pp_cxx_end_template_argument_list (pp);
}
/* Prints the unqualified part of the id-expression T.
unqualified-id:
identifier
operator-function-id
conversion-function-id
~ class-name
template-id */
static void
pp_cxx_unqualified_id (cxx_pretty_printer *pp, tree t)
{
enum tree_code code = TREE_CODE (t);
switch (code)
{
case RESULT_DECL:
pp->translate_string ("<return-value>");
break;
case OVERLOAD:
t = OVL_FIRST (t);
/* FALLTHRU */
case VAR_DECL:
case PARM_DECL:
case CONST_DECL:
case TYPE_DECL:
case FUNCTION_DECL:
case NAMESPACE_DECL:
case FIELD_DECL:
case LABEL_DECL:
case USING_DECL:
case TEMPLATE_DECL:
t = DECL_NAME (t);
/* FALLTHRU */
case IDENTIFIER_NODE:
if (t == NULL)
pp->translate_string ("<unnamed>");
else if (IDENTIFIER_CONV_OP_P (t))
pp_cxx_conversion_function_id (pp, t);
else
pp_cxx_tree_identifier (pp, t);
break;
case TEMPLATE_ID_EXPR:
pp_cxx_template_id (pp, t);
break;
case BASELINK:
pp_cxx_unqualified_id (pp, BASELINK_FUNCTIONS (t));
break;
case RECORD_TYPE:
case UNION_TYPE:
case ENUMERAL_TYPE:
case TYPENAME_TYPE:
case UNBOUND_CLASS_TEMPLATE:
pp_cxx_unqualified_id (pp, TYPE_NAME (t));
if (tree ti = TYPE_TEMPLATE_INFO_MAYBE_ALIAS (t))
if (PRIMARY_TEMPLATE_P (TI_TEMPLATE (ti)))
{
pp_cxx_begin_template_argument_list (pp);
tree args = INNERMOST_TEMPLATE_ARGS (TI_ARGS (ti));
pp_cxx_template_argument_list (pp, args);
pp_cxx_end_template_argument_list (pp);
}
break;
case BIT_NOT_EXPR:
pp_cxx_complement (pp);
pp_cxx_unqualified_id (pp, TREE_OPERAND (t, 0));
break;
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
if (template_placeholder_p (t))
{
t = TREE_TYPE (CLASS_PLACEHOLDER_TEMPLATE (t));
pp_cxx_unqualified_id (pp, TYPE_IDENTIFIER (t));
pp_string (pp, "<...auto...>");
}
else if (TYPE_IDENTIFIER (t))
pp_cxx_unqualified_id (pp, TYPE_IDENTIFIER (t));
else
pp_cxx_canonical_template_parameter (pp, t);
break;
case TEMPLATE_PARM_INDEX:
pp_cxx_unqualified_id (pp, TEMPLATE_PARM_DECL (t));
break;
case BOUND_TEMPLATE_TEMPLATE_PARM:
pp_cxx_cv_qualifier_seq (pp, t);
pp_cxx_unqualified_id (pp, TYPE_IDENTIFIER (t));
pp_cxx_begin_template_argument_list (pp);
pp_cxx_template_argument_list (pp, TYPE_TI_ARGS (t));
pp_cxx_end_template_argument_list (pp);
break;
default:
pp_unsupported_tree (pp, t);
break;
}
}
/* Pretty-print out the token sequence ":: template" in template codes
where it is needed to "inline declare" the (following) member as
a template. This situation arises when SCOPE of T is dependent
on template parameters. */
static inline void
pp_cxx_template_keyword_if_needed (cxx_pretty_printer *pp, tree scope, tree t)
{
if (TREE_CODE (t) == TEMPLATE_ID_EXPR
&& TYPE_P (scope) && dependent_type_p (scope))
pp_cxx_ws_string (pp, "template");
}
/* nested-name-specifier:
class-or-namespace-name :: nested-name-specifier(opt)
class-or-namespace-name :: template nested-name-specifier */
static void
pp_cxx_nested_name_specifier (cxx_pretty_printer *pp, tree t)
{
/* FIXME: When diagnosing references to concepts (especially as types?)
we end up adding too many '::' to the name. This is partially due
to the fact that pp->enclosing_namespace is null. */
if (t == global_namespace)
{
pp_cxx_colon_colon (pp);
}
else if (!SCOPE_FILE_SCOPE_P (t) && t != pp->enclosing_scope)
{
tree scope = get_containing_scope (t);
pp_cxx_nested_name_specifier (pp, scope);
pp_cxx_template_keyword_if_needed (pp, scope, t);
pp_cxx_unqualified_id (pp, t);
pp_cxx_colon_colon (pp);
}
}
/* qualified-id:
nested-name-specifier template(opt) unqualified-id */
static void
pp_cxx_qualified_id (cxx_pretty_printer *pp, tree t)
{
switch (TREE_CODE (t))
{
/* A pointer-to-member is always qualified. */
case PTRMEM_CST:
pp_cxx_nested_name_specifier (pp, PTRMEM_CST_CLASS (t));
pp_cxx_unqualified_id (pp, PTRMEM_CST_MEMBER (t));
break;
/* In Standard C++, functions cannot possibly be used as
nested-name-specifiers. However, there are situations where
is "makes sense" to output the surrounding function name for the
purpose of emphasizing on the scope kind. Just printing the
function name might not be sufficient as it may be overloaded; so,
we decorate the function with its signature too.
FIXME: This is probably the wrong pretty-printing for conversion
functions and some function templates. */
case OVERLOAD:
t = OVL_FIRST (t);
/* FALLTHRU */
case FUNCTION_DECL:
if (DECL_FUNCTION_MEMBER_P (t))
pp_cxx_nested_name_specifier (pp, DECL_CONTEXT (t));
pp_cxx_unqualified_id
(pp, DECL_CONSTRUCTOR_P (t) ? DECL_CONTEXT (t) : t);
pp_cxx_parameter_declaration_clause (pp, TREE_TYPE (t));
break;
case OFFSET_REF:
case SCOPE_REF:
pp_cxx_nested_name_specifier (pp, TREE_OPERAND (t, 0));
pp_cxx_unqualified_id (pp, TREE_OPERAND (t, 1));
break;
default:
{
tree scope = get_containing_scope (t);
if (scope != pp->enclosing_scope)
{
pp_cxx_nested_name_specifier (pp, scope);
pp_cxx_template_keyword_if_needed (pp, scope, t);
}
pp_cxx_unqualified_id (pp, t);
}
break;
}
}
/* Given a value e of ENUMERAL_TYPE:
Print out the first ENUMERATOR id with value e, if one is found,
(including nested names but excluding the enum name if unscoped)
else print out the value as a C-style cast (type-id)value. */
static void
pp_cxx_enumeration_constant (cxx_pretty_printer *pp, tree e)
{
tree type = TREE_TYPE (e);
tree value = NULL_TREE;
/* Find the name of this constant. */
if ((pp->flags & pp_c_flag_gnu_v3) == 0)
for (value = TYPE_VALUES (type); value != NULL_TREE;
value = TREE_CHAIN (value))
if (tree_int_cst_equal (DECL_INITIAL (TREE_VALUE (value)), e))
break;
if (value != NULL_TREE)
{
if (!ENUM_IS_SCOPED (type))
type = get_containing_scope (type);
pp_cxx_nested_name_specifier (pp, type);
pp->id_expression (TREE_PURPOSE (value));
}
else
{
/* Value must have been cast. */
pp_c_type_cast (pp, type);
pp_c_integer_constant (pp, e);
}
}
void
cxx_pretty_printer::constant (tree t)
{
switch (TREE_CODE (t))
{
case STRING_CST:
{
const bool in_parens = PAREN_STRING_LITERAL_P (t);
if (in_parens)
pp_cxx_left_paren (this);
c_pretty_printer::constant (t);
if (in_parens)
pp_cxx_right_paren (this);
}
break;
case INTEGER_CST:
if (NULLPTR_TYPE_P (TREE_TYPE (t)))
{
pp_string (this, "nullptr");
break;
}
else if (TREE_CODE (TREE_TYPE (t)) == ENUMERAL_TYPE)
{
pp_cxx_enumeration_constant (this, t);
break;
}
/* fall through. */
default:
c_pretty_printer::constant (t);
break;
}
}
/* id-expression:
unqualified-id
qualified-id */
void
cxx_pretty_printer::id_expression (tree t)
{
if (TREE_CODE (t) == OVERLOAD)
t = OVL_FIRST (t);
if (DECL_P (t) && DECL_CONTEXT (t))
pp_cxx_qualified_id (this, t);
else
pp_cxx_unqualified_id (this, t);
}
/* user-defined literal:
literal ud-suffix */
void
pp_cxx_userdef_literal (cxx_pretty_printer *pp, tree t)
{
pp->constant (USERDEF_LITERAL_VALUE (t));
pp->id_expression (USERDEF_LITERAL_SUFFIX_ID (t));
}
/* primary-expression:
literal
this
:: identifier
:: operator-function-id
:: qualifier-id
( expression )
id-expression
GNU Extensions:
__builtin_va_arg ( assignment-expression , type-id )
__builtin_offsetof ( type-id, offsetof-expression )
__builtin_addressof ( expression )
__has_nothrow_assign ( type-id )
__has_nothrow_constructor ( type-id )
__has_nothrow_copy ( type-id )
__has_trivial_assign ( type-id )
__has_trivial_constructor ( type-id )
__has_trivial_copy ( type-id )
__has_unique_object_representations ( type-id )
__has_trivial_destructor ( type-id )
__has_virtual_destructor ( type-id )
__is_abstract ( type-id )
__is_base_of ( type-id , type-id )
__is_class ( type-id )
__is_empty ( type-id )
__is_enum ( type-id )
__is_literal_type ( type-id )
__is_pod ( type-id )
__is_polymorphic ( type-id )
__is_std_layout ( type-id )
__is_trivial ( type-id )
__is_union ( type-id ) */
void
cxx_pretty_printer::primary_expression (tree t)
{
switch (TREE_CODE (t))
{
case VOID_CST:
case INTEGER_CST:
case REAL_CST:
case COMPLEX_CST:
case STRING_CST:
constant (t);
break;
case USERDEF_LITERAL:
pp_cxx_userdef_literal (this, t);
break;
case BASELINK:
t = BASELINK_FUNCTIONS (t);
/* FALLTHRU */
case VAR_DECL:
case PARM_DECL:
case FIELD_DECL:
case FUNCTION_DECL:
case OVERLOAD:
case CONST_DECL:
case TEMPLATE_DECL:
id_expression (t);
break;
case RESULT_DECL:
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case TEMPLATE_PARM_INDEX:
pp_cxx_unqualified_id (this, t);
break;
case STMT_EXPR:
pp_cxx_left_paren (this);
statement (STMT_EXPR_STMT (t));
pp_cxx_right_paren (this);
break;
case TRAIT_EXPR:
pp_cxx_trait_expression (this, t);
break;
case VA_ARG_EXPR:
pp_cxx_va_arg_expression (this, t);
break;
case OFFSETOF_EXPR:
pp_cxx_offsetof_expression (this, t);
break;
case ADDRESSOF_EXPR:
pp_cxx_addressof_expression (this, t);
break;
case REQUIRES_EXPR:
pp_cxx_requires_expr (this, t);
break;
default:
c_pretty_printer::primary_expression (t);
break;
}
}
/* postfix-expression:
primary-expression
postfix-expression [ expression ]
postfix-expression ( expression-list(opt) )
simple-type-specifier ( expression-list(opt) )
typename ::(opt) nested-name-specifier identifier ( expression-list(opt) )
typename ::(opt) nested-name-specifier template(opt)
template-id ( expression-list(opt) )
postfix-expression . template(opt) ::(opt) id-expression
postfix-expression -> template(opt) ::(opt) id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> pseudo-destructor-name
postfix-expression ++
postfix-expression --
dynamic_cast < type-id > ( expression )
static_cast < type-id > ( expression )
reinterpret_cast < type-id > ( expression )
const_cast < type-id > ( expression )
typeid ( expression )
typeid ( type-id ) */
void
cxx_pretty_printer::postfix_expression (tree t)
{
enum tree_code code = TREE_CODE (t);
switch (code)
{
case AGGR_INIT_EXPR:
case CALL_EXPR:
{
tree fun = cp_get_callee (t);
tree saved_scope = enclosing_scope;
bool skipfirst = false;
tree arg;
if (TREE_CODE (fun) == ADDR_EXPR)
fun = TREE_OPERAND (fun, 0);
/* In templates, where there is no way to tell whether a given
call uses an actual member function. So the parser builds
FUN as a COMPONENT_REF or a plain IDENTIFIER_NODE until
instantiation time. */
if (TREE_CODE (fun) != FUNCTION_DECL)
;
else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fun))
{
tree object = (code == AGGR_INIT_EXPR
? (AGGR_INIT_VIA_CTOR_P (t)
? AGGR_INIT_EXPR_SLOT (t)
: AGGR_INIT_EXPR_ARG (t, 0))
: CALL_EXPR_ARG (t, 0));
while (TREE_CODE (object) == NOP_EXPR)
object = TREE_OPERAND (object, 0);
if (TREE_CODE (object) == ADDR_EXPR)
object = TREE_OPERAND (object, 0);
if (!TYPE_PTR_P (TREE_TYPE (object)))
{
postfix_expression (object);
pp_cxx_dot (this);
}
else
{
postfix_expression (object);
pp_cxx_arrow (this);
}
skipfirst = true;
enclosing_scope = strip_pointer_operator (TREE_TYPE (object));
}
postfix_expression (fun);
enclosing_scope = saved_scope;
pp_cxx_left_paren (this);
if (code == AGGR_INIT_EXPR)
{
aggr_init_expr_arg_iterator iter;
FOR_EACH_AGGR_INIT_EXPR_ARG (arg, iter, t)
{
if (skipfirst)
skipfirst = false;
else
{
expression (arg);
if (more_aggr_init_expr_args_p (&iter))
pp_cxx_separate_with (this, ',');
}
}
}
else
{
call_expr_arg_iterator iter;
FOR_EACH_CALL_EXPR_ARG (arg, iter, t)
{
if (skipfirst)
skipfirst = false;
else
{
expression (arg);
if (more_call_expr_args_p (&iter))
pp_cxx_separate_with (this, ',');
}
}
}
pp_cxx_right_paren (this);
}
if (code == AGGR_INIT_EXPR && AGGR_INIT_VIA_CTOR_P (t))
{
pp_cxx_separate_with (this, ',');
postfix_expression (AGGR_INIT_EXPR_SLOT (t));
}
break;
case BASELINK:
case VAR_DECL:
case PARM_DECL:
case FIELD_DECL:
case FUNCTION_DECL:
case OVERLOAD:
case CONST_DECL:
case TEMPLATE_DECL:
case RESULT_DECL:
primary_expression (t);
break;
case DYNAMIC_CAST_EXPR:
case STATIC_CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
if (code == DYNAMIC_CAST_EXPR)
pp_cxx_ws_string (this, "dynamic_cast");
else if (code == STATIC_CAST_EXPR)
pp_cxx_ws_string (this, "static_cast");
else if (code == REINTERPRET_CAST_EXPR)
pp_cxx_ws_string (this, "reinterpret_cast");
else
pp_cxx_ws_string (this, "const_cast");
pp_cxx_begin_template_argument_list (this);
type_id (TREE_TYPE (t));
pp_cxx_end_template_argument_list (this);
pp_left_paren (this);
expression (TREE_OPERAND (t, 0));
pp_right_paren (this);
break;
case EMPTY_CLASS_EXPR:
type_id (TREE_TYPE (t));
pp_left_paren (this);
pp_right_paren (this);
break;
case TYPEID_EXPR:
pp_cxx_typeid_expression (this, t);
break;
case PSEUDO_DTOR_EXPR:
postfix_expression (TREE_OPERAND (t, 0));
pp_cxx_dot (this);
if (TREE_OPERAND (t, 1))
{
pp_cxx_qualified_id (this, TREE_OPERAND (t, 1));
pp_cxx_colon_colon (this);
}
pp_complement (this);
pp_cxx_unqualified_id (this, TREE_OPERAND (t, 2));
break;
case ARROW_EXPR:
postfix_expression (TREE_OPERAND (t, 0));
pp_cxx_arrow (this);
break;
default:
c_pretty_printer::postfix_expression (t);
break;
}
}
/* new-expression:
::(opt) new new-placement(opt) new-type-id new-initializer(opt)
::(opt) new new-placement(opt) ( type-id ) new-initializer(opt)
new-placement:
( expression-list )
new-type-id:
type-specifier-seq new-declarator(opt)
new-declarator:
ptr-operator new-declarator(opt)
direct-new-declarator
direct-new-declarator
[ expression ]
direct-new-declarator [ constant-expression ]
new-initializer:
( expression-list(opt) ) */
static void
pp_cxx_new_expression (cxx_pretty_printer *pp, tree t)
{
enum tree_code code = TREE_CODE (t);
tree type = TREE_OPERAND (t, 1);
tree init = TREE_OPERAND (t, 2);
switch (code)
{
case NEW_EXPR:
case VEC_NEW_EXPR:
if (NEW_EXPR_USE_GLOBAL (t))
pp_cxx_colon_colon (pp);
pp_cxx_ws_string (pp, "new");
if (TREE_OPERAND (t, 0))
{
pp_cxx_call_argument_list (pp, TREE_OPERAND (t, 0));
pp_space (pp);
}
if (TREE_CODE (type) == ARRAY_REF)
type = build_cplus_array_type
(TREE_OPERAND (type, 0),
build_index_type (fold_build2_loc (input_location,
MINUS_EXPR, integer_type_node,
TREE_OPERAND (type, 1),
integer_one_node)));
pp->type_id (type);
if (init)
{
pp_left_paren (pp);
if (TREE_CODE (init) == TREE_LIST)
pp_c_expression_list (pp, init);
else if (init == void_node)
; /* OK, empty initializer list. */
else
pp->expression (init);
pp_right_paren (pp);
}
break;
default:
pp_unsupported_tree (pp, t);
}
}
/* delete-expression:
::(opt) delete cast-expression
::(opt) delete [ ] cast-expression */
static void
pp_cxx_delete_expression (cxx_pretty_printer *pp, tree t)
{
enum tree_code code = TREE_CODE (t);
switch (code)
{
case DELETE_EXPR:
case VEC_DELETE_EXPR:
if (DELETE_EXPR_USE_GLOBAL (t))
pp_cxx_colon_colon (pp);
pp_cxx_ws_string (pp, "delete");
pp_space (pp);
if (code == VEC_DELETE_EXPR
|| DELETE_EXPR_USE_VEC (t))
{
pp_left_bracket (pp);
pp_right_bracket (pp);
pp_space (pp);
}
pp_c_cast_expression (pp, TREE_OPERAND (t, 0));
break;
default:
pp_unsupported_tree (pp, t);
}
}
/* unary-expression:
postfix-expression
++ cast-expression
-- cast-expression
unary-operator cast-expression
sizeof unary-expression
sizeof ( type-id )
sizeof ... ( identifier )
new-expression
delete-expression
unary-operator: one of
* & + - !
GNU extensions:
__alignof__ unary-expression
__alignof__ ( type-id ) */
void
cxx_pretty_printer::unary_expression (tree t)
{
enum tree_code code = TREE_CODE (t);
switch (code)
{
case NEW_EXPR:
case VEC_NEW_EXPR:
pp_cxx_new_expression (this, t);
break;
case DELETE_EXPR:
case VEC_DELETE_EXPR:
pp_cxx_delete_expression (this, t);
break;
case SIZEOF_EXPR:
if (PACK_EXPANSION_P (TREE_OPERAND (t, 0)))
{
pp_cxx_ws_string (this, "sizeof");
pp_cxx_ws_string (this, "...");
pp_cxx_whitespace (this);
pp_cxx_left_paren (this);
if (TYPE_P (TREE_OPERAND (t, 0)))
type_id (TREE_OPERAND (t, 0));
else
unary_expression (TREE_OPERAND (t, 0));
pp_cxx_right_paren (this);
break;
}
/* Fall through */
case ALIGNOF_EXPR:
pp_cxx_ws_string (this, code == SIZEOF_EXPR ? "sizeof" : "__alignof__");
pp_cxx_whitespace (this);
if (TREE_CODE (t) == SIZEOF_EXPR && SIZEOF_EXPR_TYPE_P (t))
{
pp_cxx_left_paren (this);
type_id (TREE_TYPE (TREE_OPERAND (t, 0)));
pp_cxx_right_paren (this);
}
else if (TYPE_P (TREE_OPERAND (t, 0)))
{
pp_cxx_left_paren (this);
type_id (TREE_OPERAND (t, 0));
pp_cxx_right_paren (this);
}
else
unary_expression (TREE_OPERAND (t, 0));
break;
case AT_ENCODE_EXPR:
pp_cxx_ws_string (this, "@encode");
pp_cxx_whitespace (this);
pp_cxx_left_paren (this);
type_id (TREE_OPERAND (t, 0));
pp_cxx_right_paren (this);
break;
case NOEXCEPT_EXPR:
pp_cxx_ws_string (this, "noexcept");
pp_cxx_whitespace (this);
pp_cxx_left_paren (this);
expression (TREE_OPERAND (t, 0));
pp_cxx_right_paren (this);
break;
case UNARY_PLUS_EXPR:
pp_plus (this);
pp_cxx_cast_expression (this, TREE_OPERAND (t, 0));
break;
default:
c_pretty_printer::unary_expression (t);
break;
}
}
/* cast-expression:
unary-expression
( type-id ) cast-expression */
static void
pp_cxx_cast_expression (cxx_pretty_printer *pp, tree t)
{
switch (TREE_CODE (t))
{
case CAST_EXPR:
case IMPLICIT_CONV_EXPR:
pp->type_id (TREE_TYPE (t));
pp_cxx_call_argument_list (pp, TREE_OPERAND (t, 0));
break;
default:
pp_c_cast_expression (pp, t);
break;
}
}
/* pm-expression:
cast-expression
pm-expression .* cast-expression
pm-expression ->* cast-expression */
static void
pp_cxx_pm_expression (cxx_pretty_printer *pp, tree t)
{
switch (TREE_CODE (t))
{
/* Handle unfortunate OFFSET_REF overloading here. */
case OFFSET_REF:
if (TYPE_P (TREE_OPERAND (t, 0)))
{
pp_cxx_qualified_id (pp, t);
break;
}
/* Fall through. */
case MEMBER_REF:
case DOTSTAR_EXPR:
pp_cxx_pm_expression (pp, TREE_OPERAND (t, 0));
if (TREE_CODE (t) == MEMBER_REF)
pp_cxx_arrow (pp);
else
pp_cxx_dot (pp);
pp_star(pp);
pp_cxx_cast_expression (pp, TREE_OPERAND (t, 1));
break;
default:
pp_cxx_cast_expression (pp, t);
break;
}
}
/* multiplicative-expression:
pm-expression
multiplicative-expression * pm-expression
multiplicative-expression / pm-expression
multiplicative-expression % pm-expression */
void
cxx_pretty_printer::multiplicative_expression (tree e)
{
enum tree_code code = TREE_CODE (e);
switch (code)
{
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case TRUNC_MOD_EXPR:
case EXACT_DIV_EXPR:
case RDIV_EXPR:
multiplicative_expression (TREE_OPERAND (e, 0));
pp_space (this);
if (code == MULT_EXPR)
pp_star (this);
else if (code != TRUNC_MOD_EXPR)
pp_slash (this);
else
pp_modulo (this);
pp_space (this);
pp_cxx_pm_expression (this, TREE_OPERAND (e, 1));
break;
default:
pp_cxx_pm_expression (this, e);
break;
}
}
/* conditional-expression:
logical-or-expression
logical-or-expression ? expression : assignment-expression */
void
cxx_pretty_printer::conditional_expression (tree e)
{
if (TREE_CODE (e) == COND_EXPR)
{
pp_c_logical_or_expression (this, TREE_OPERAND (e, 0));
pp_space (this);
pp_question (this);
pp_space (this);
expression (TREE_OPERAND (e, 1));
pp_space (this);
assignment_expression (TREE_OPERAND (e, 2));
}
else
pp_c_logical_or_expression (this, e);
}
/* Pretty-print a compound assignment operator token as indicated by T. */
static void
pp_cxx_assignment_operator (cxx_pretty_printer *pp, tree t)
{
const char *op;
switch (TREE_CODE (t))
{
case NOP_EXPR:
op = "=";
break;
case PLUS_EXPR:
op = "+=";
break;
case MINUS_EXPR:
op = "-=";
break;
case TRUNC_DIV_EXPR:
op = "/=";
break;
case TRUNC_MOD_EXPR:
op = "%=";
break;
default:
op = get_tree_code_name (TREE_CODE (t));
break;
}
pp_cxx_ws_string (pp, op);
}
/* assignment-expression:
conditional-expression
logical-or-expression assignment-operator assignment-expression
throw-expression
throw-expression:
throw assignment-expression(opt)
assignment-operator: one of
= *= /= %= += -= >>= <<= &= ^= |= */
void
cxx_pretty_printer::assignment_expression (tree e)
{
switch (TREE_CODE (e))
{
case MODIFY_EXPR:
case INIT_EXPR:
pp_c_logical_or_expression (this, TREE_OPERAND (e, 0));
pp_space (this);
pp_equal (this);
pp_space (this);
assignment_expression (TREE_OPERAND (e, 1));
break;
case THROW_EXPR:
pp_cxx_ws_string (this, "throw");
if (TREE_OPERAND (e, 0))
assignment_expression (TREE_OPERAND (e, 0));
break;
case MODOP_EXPR:
pp_c_logical_or_expression (this, TREE_OPERAND (e, 0));
pp_cxx_assignment_operator (this, TREE_OPERAND (e, 1));
assignment_expression (TREE_OPERAND (e, 2));
break;
default:
conditional_expression (e);
break;
}
}
void
cxx_pretty_printer::expression (tree t)
{
switch (TREE_CODE (t))
{
case STRING_CST:
case VOID_CST:
case INTEGER_CST:
case REAL_CST:
case COMPLEX_CST:
constant (t);
break;
case USERDEF_LITERAL:
pp_cxx_userdef_literal (this, t);
break;
case RESULT_DECL:
pp_cxx_unqualified_id (this, t);
break;
#if 0
case OFFSET_REF:
#endif
case SCOPE_REF:
case PTRMEM_CST:
pp_cxx_qualified_id (this, t);
break;
case OVERLOAD:
t = OVL_FIRST (t);
/* FALLTHRU */
case VAR_DECL:
case PARM_DECL:
case FIELD_DECL:
case CONST_DECL:
case FUNCTION_DECL:
case BASELINK:
case TEMPLATE_DECL:
case TEMPLATE_TYPE_PARM:
case TEMPLATE_PARM_INDEX:
case TEMPLATE_TEMPLATE_PARM:
case STMT_EXPR:
case REQUIRES_EXPR:
primary_expression (t);
break;
case CALL_EXPR:
case DYNAMIC_CAST_EXPR:
case STATIC_CAST_EXPR:
case REINTERPRET_CAST_EXPR:
case CONST_CAST_EXPR:
#if 0
case MEMBER_REF:
#endif
case EMPTY_CLASS_EXPR:
case TYPEID_EXPR:
case PSEUDO_DTOR_EXPR:
case AGGR_INIT_EXPR:
case ARROW_EXPR:
postfix_expression (t);
break;
case NEW_EXPR:
case VEC_NEW_EXPR:
pp_cxx_new_expression (this, t);
break;
case DELETE_EXPR:
case VEC_DELETE_EXPR:
pp_cxx_delete_expression (this, t);
break;
case SIZEOF_EXPR:
case ALIGNOF_EXPR:
case NOEXCEPT_EXPR:
case UNARY_PLUS_EXPR:
unary_expression (t);
break;
case CAST_EXPR:
case IMPLICIT_CONV_EXPR:
pp_cxx_cast_expression (this, t);
break;
case OFFSET_REF:
case MEMBER_REF:
case DOTSTAR_EXPR:
pp_cxx_pm_expression (this, t);
break;
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case TRUNC_MOD_EXPR:
case EXACT_DIV_EXPR:
case RDIV_EXPR:
multiplicative_expression (t);
break;
case COND_EXPR:
conditional_expression (t);
break;
case MODIFY_EXPR:
case INIT_EXPR:
case THROW_EXPR:
case MODOP_EXPR:
assignment_expression (t);
break;
case NON_DEPENDENT_EXPR:
case MUST_NOT_THROW_EXPR:
expression (TREE_OPERAND (t, 0));
break;
case EXPR_PACK_EXPANSION:
expression (PACK_EXPANSION_PATTERN (t));
pp_cxx_ws_string (this, "...");
break;
case UNARY_LEFT_FOLD_EXPR:
pp_cxx_unary_left_fold_expression (this, t);
break;
case UNARY_RIGHT_FOLD_EXPR:
pp_cxx_unary_right_fold_expression (this, t);
break;
case BINARY_LEFT_FOLD_EXPR:
case BINARY_RIGHT_FOLD_EXPR:
pp_cxx_binary_fold_expression (this, t);
break;
case TEMPLATE_ID_EXPR:
pp_cxx_template_id (this, t);
break;
case NONTYPE_ARGUMENT_PACK:
{
tree args = ARGUMENT_PACK_ARGS (t);
int i, len = TREE_VEC_LENGTH (args);
pp_cxx_left_brace (this);
for (i = 0; i < len; ++i)
{
if (i > 0)
pp_cxx_separate_with (this, ',');
expression (TREE_VEC_ELT (args, i));
}
pp_cxx_right_brace (this);
}
break;
case LAMBDA_EXPR:
pp_cxx_ws_string (this, "<lambda>");
break;
case TRAIT_EXPR:
pp_cxx_trait_expression (this, t);
break;
case ATOMIC_CONSTR:
case CHECK_CONSTR:
case CONJ_CONSTR:
case DISJ_CONSTR:
pp_cxx_constraint (this, t);
break;
case PAREN_EXPR:
pp_cxx_left_paren (this);
expression (TREE_OPERAND (t, 0));
pp_cxx_right_paren (this);
break;
default:
c_pretty_printer::expression (t);
break;
}
}
/* Declarations. */
/* function-specifier:
inline
virtual
explicit */
void
cxx_pretty_printer::function_specifier (tree t)
{
switch (TREE_CODE (t))
{
case FUNCTION_DECL:
if (DECL_VIRTUAL_P (t))
pp_cxx_ws_string (this, "virtual");
else if (DECL_CONSTRUCTOR_P (t) && DECL_NONCONVERTING_P (t))
pp_cxx_ws_string (this, "explicit");
else
c_pretty_printer::function_specifier (t);
default:
break;
}
}
/* decl-specifier-seq:
decl-specifier-seq(opt) decl-specifier
decl-specifier:
storage-class-specifier
type-specifier
function-specifier
friend
typedef */
void
cxx_pretty_printer::declaration_specifiers (tree t)
{
switch (TREE_CODE (t))
{
case VAR_DECL:
case PARM_DECL:
case CONST_DECL:
case FIELD_DECL:
storage_class_specifier (t);
declaration_specifiers (TREE_TYPE (t));
break;
case TYPE_DECL:
pp_cxx_ws_string (this, "typedef");
declaration_specifiers (TREE_TYPE (t));
break;
case FUNCTION_DECL:
/* Constructors don't have return types. And conversion functions
do not have a type-specifier in their return types. */
if (DECL_CONSTRUCTOR_P (t) || DECL_CONV_FN_P (t))
function_specifier (t);
else if (DECL_NONSTATIC_MEMBER_FUNCTION_P (t))
declaration_specifiers (TREE_TYPE (TREE_TYPE (t)));
else
c_pretty_printer::declaration_specifiers (t);
break;
default:
c_pretty_printer::declaration_specifiers (t);
break;
}
}
/* simple-type-specifier:
::(opt) nested-name-specifier(opt) type-name
::(opt) nested-name-specifier(opt) template(opt) template-id
decltype-specifier
char
wchar_t
bool
short
int
long
signed
unsigned
float
double
void */
void
cxx_pretty_printer::simple_type_specifier (tree t)
{
switch (TREE_CODE (t))
{
case RECORD_TYPE:
case UNION_TYPE:
case ENUMERAL_TYPE:
pp_cxx_qualified_id (this, t);
break;
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case TEMPLATE_PARM_INDEX:
case BOUND_TEMPLATE_TEMPLATE_PARM:
pp_cxx_unqualified_id (this, t);
if (tree c = PLACEHOLDER_TYPE_CONSTRAINTS (t))
pp_cxx_constrained_type_spec (this, c);
break;
case TYPENAME_TYPE:
pp_cxx_ws_string (this, "typename");
pp_cxx_nested_name_specifier (this, TYPE_CONTEXT (t));
pp_cxx_unqualified_id (this, TYPENAME_TYPE_FULLNAME (t));
break;
case DECLTYPE_TYPE:
pp_cxx_ws_string (this, "decltype");
pp_cxx_left_paren (this);
this->expression (DECLTYPE_TYPE_EXPR (t));
pp_cxx_right_paren (this);
break;
default:
c_pretty_printer::simple_type_specifier (t);
break;
}
}
/* type-specifier-seq:
type-specifier type-specifier-seq(opt)
type-specifier:
simple-type-specifier
class-specifier
enum-specifier
elaborated-type-specifier
cv-qualifier */
static void
pp_cxx_type_specifier_seq (cxx_pretty_printer *pp, tree t)
{
switch (TREE_CODE (t))
{
case TEMPLATE_DECL:
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case TYPE_DECL:
case BOUND_TEMPLATE_TEMPLATE_PARM:
case DECLTYPE_TYPE:
pp_cxx_cv_qualifier_seq (pp, t);
pp->simple_type_specifier (t);
break;
case METHOD_TYPE:
pp_cxx_type_specifier_seq (pp, TREE_TYPE (t));
pp_cxx_space_for_pointer_operator (pp, TREE_TYPE (t));
pp_cxx_nested_name_specifier (pp, TYPE_METHOD_BASETYPE (t));
break;
case RECORD_TYPE:
if (TYPE_PTRMEMFUNC_P (t))
{
tree pfm = TYPE_PTRMEMFUNC_FN_TYPE (t);
pp->declaration_specifiers (TREE_TYPE (TREE_TYPE (pfm)));
pp_cxx_whitespace (pp);
pp_cxx_ptr_operator (pp, t);
break;
}
/* fall through */
case OFFSET_TYPE:
if (TYPE_PTRDATAMEM_P (t))
{
pp_cxx_type_specifier_seq (pp, TREE_TYPE (t));
pp_cxx_whitespace (pp);
pp_cxx_ptr_operator (pp, t);
break;
}
/* fall through */
default:
if (!(TREE_CODE (t) == FUNCTION_DECL && DECL_CONSTRUCTOR_P (t)))
pp_c_specifier_qualifier_list (pp, t);
}
}
/* ptr-operator:
* cv-qualifier-seq(opt)
&
::(opt) nested-name-specifier * cv-qualifier-seq(opt) */
static void
pp_cxx_ptr_operator (cxx_pretty_printer *pp, tree t)
{
if (!TYPE_P (t) && TREE_CODE (t) != TYPE_DECL)
t = TREE_TYPE (t);
switch (TREE_CODE (t))
{
case REFERENCE_TYPE:
case POINTER_TYPE:
if (TYPE_PTR_OR_PTRMEM_P (TREE_TYPE (t)))
pp_cxx_ptr_operator (pp, TREE_TYPE (t));
pp_c_attributes_display (pp, TYPE_ATTRIBUTES (TREE_TYPE (t)));
if (TYPE_PTR_P (t))
{
pp_star (pp);
pp_cxx_cv_qualifier_seq (pp, t);
}
else
pp_ampersand (pp);
break;
case RECORD_TYPE:
if (TYPE_PTRMEMFUNC_P (t))
{
pp_cxx_left_paren (pp);
pp_cxx_nested_name_specifier (pp, TYPE_PTRMEMFUNC_OBJECT_TYPE (t));
pp_star (pp);
break;
}
/* FALLTHRU */
case OFFSET_TYPE:
if (TYPE_PTRMEM_P (t))
{
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
pp_cxx_left_paren (pp);
pp_cxx_nested_name_specifier (pp, TYPE_PTRMEM_CLASS_TYPE (t));
pp_star (pp);
pp_cxx_cv_qualifier_seq (pp, t);
break;
}
/* fall through. */
default:
pp_unsupported_tree (pp, t);
break;
}
}
static inline tree
pp_cxx_implicit_parameter_type (tree mf)
{
return class_of_this_parm (TREE_TYPE (mf));
}
/*
parameter-declaration:
decl-specifier-seq declarator
decl-specifier-seq declarator = assignment-expression
decl-specifier-seq abstract-declarator(opt)
decl-specifier-seq abstract-declarator(opt) assignment-expression */
static inline void
pp_cxx_parameter_declaration (cxx_pretty_printer *pp, tree t)
{
pp->declaration_specifiers (t);
if (TYPE_P (t))
pp->abstract_declarator (t);
else
pp->declarator (t);
}
/* parameter-declaration-clause:
parameter-declaration-list(opt) ...(opt)
parameter-declaration-list , ...
parameter-declaration-list:
parameter-declaration
parameter-declaration-list , parameter-declaration */
static void
pp_cxx_parameter_declaration_clause (cxx_pretty_printer *pp, tree t)
{
tree args;
tree types;
bool abstract;
// For a requires clause or the explicit printing of a parameter list
// we expect T to be a chain of PARM_DECLs. Otherwise, the list of
// args and types are taken from the function decl T.
if (TREE_CODE (t) == PARM_DECL)
{
args = t;
types = t;
abstract = false;
}
else
{
bool type_p = TYPE_P (t);
args = type_p ? NULL : FUNCTION_FIRST_USER_PARM (t);
types = type_p ? TYPE_ARG_TYPES (t) : FUNCTION_FIRST_USER_PARMTYPE (t);
abstract = args == NULL || pp->flags & pp_c_flag_abstract;
}
bool first = true;
/* Skip artificial parameter for non-static member functions. */
if (TREE_CODE (t) == METHOD_TYPE)
types = TREE_CHAIN (types);
pp_cxx_left_paren (pp);
for (; args; args = TREE_CHAIN (args), types = TREE_CHAIN (types))
{
if (!first)
pp_cxx_separate_with (pp, ',');
first = false;
pp_cxx_parameter_declaration (pp, abstract ? TREE_VALUE (types) : args);
if (!abstract && pp->flags & pp_cxx_flag_default_argument)
{
pp_cxx_whitespace (pp);
pp_equal (pp);
pp_cxx_whitespace (pp);
pp->assignment_expression (TREE_PURPOSE (types));
}
}
pp_cxx_right_paren (pp);
}
/* exception-specification:
throw ( type-id-list(opt) )
type-id-list
type-id
type-id-list , type-id */
static void
pp_cxx_exception_specification (cxx_pretty_printer *pp, tree t)
{
tree ex_spec = TYPE_RAISES_EXCEPTIONS (t);
bool need_comma = false;
if (ex_spec == NULL)
return;
if (TREE_PURPOSE (ex_spec))
{
pp_cxx_ws_string (pp, "noexcept");
pp_cxx_whitespace (pp);
pp_cxx_left_paren (pp);
if (DEFERRED_NOEXCEPT_SPEC_P (ex_spec))
pp_cxx_ws_string (pp, "<uninstantiated>");
else
pp->expression (TREE_PURPOSE (ex_spec));
pp_cxx_right_paren (pp);
return;
}
pp_cxx_ws_string (pp, "throw");
pp_cxx_left_paren (pp);
for (; ex_spec && TREE_VALUE (ex_spec); ex_spec = TREE_CHAIN (ex_spec))
{
tree type = TREE_VALUE (ex_spec);
tree argpack = NULL_TREE;
int i, len = 1;
if (ARGUMENT_PACK_P (type))
{
argpack = ARGUMENT_PACK_ARGS (type);
len = TREE_VEC_LENGTH (argpack);
}
for (i = 0; i < len; ++i)
{
if (argpack)
type = TREE_VEC_ELT (argpack, i);
if (need_comma)
pp_cxx_separate_with (pp, ',');
else
need_comma = true;
pp->type_id (type);
}
}
pp_cxx_right_paren (pp);
}
/* direct-declarator:
declarator-id
direct-declarator ( parameter-declaration-clause ) cv-qualifier-seq(opt)
exception-specification(opt)
direct-declaration [ constant-expression(opt) ]
( declarator ) */
void
cxx_pretty_printer::direct_declarator (tree t)
{
switch (TREE_CODE (t))
{
case VAR_DECL:
case PARM_DECL:
case CONST_DECL:
case FIELD_DECL:
if (DECL_NAME (t))
{
pp_cxx_space_for_pointer_operator (this, TREE_TYPE (t));
if ((TREE_CODE (t) == PARM_DECL && DECL_PACK_P (t))
|| template_parameter_pack_p (t))
/* A function parameter pack or non-type template
parameter pack. */
pp_cxx_ws_string (this, "...");
id_expression (DECL_NAME (t));
}
abstract_declarator (TREE_TYPE (t));
break;
case FUNCTION_DECL:
pp_cxx_space_for_pointer_operator (this, TREE_TYPE (TREE_TYPE (t)));
expression (t);
pp_cxx_parameter_declaration_clause (this, t);
if (DECL_NONSTATIC_MEMBER_FUNCTION_P (t))
{
padding = pp_before;
pp_cxx_cv_qualifier_seq (this, pp_cxx_implicit_parameter_type (t));
}
pp_cxx_exception_specification (this, TREE_TYPE (t));
break;
case TYPENAME_TYPE:
case TEMPLATE_DECL:
case TEMPLATE_TYPE_PARM:
case TEMPLATE_PARM_INDEX:
case TEMPLATE_TEMPLATE_PARM:
break;
default:
c_pretty_printer::direct_declarator (t);
break;
}
}
/* declarator:
direct-declarator
ptr-operator declarator */
void
cxx_pretty_printer::declarator (tree t)
{
direct_declarator (t);
// Print a requires clause.
if (flag_concepts)
if (tree ci = get_constraints (t))
if (tree reqs = CI_DECLARATOR_REQS (ci))
pp_cxx_requires_clause (this, reqs);
}
/* ctor-initializer:
: mem-initializer-list
mem-initializer-list:
mem-initializer
mem-initializer , mem-initializer-list
mem-initializer:
mem-initializer-id ( expression-list(opt) )
mem-initializer-id:
::(opt) nested-name-specifier(opt) class-name
identifier */
static void
pp_cxx_ctor_initializer (cxx_pretty_printer *pp, tree t)
{
t = TREE_OPERAND (t, 0);
pp_cxx_whitespace (pp);
pp_colon (pp);
pp_cxx_whitespace (pp);
for (; t; t = TREE_CHAIN (t))
{
tree purpose = TREE_PURPOSE (t);
bool is_pack = PACK_EXPANSION_P (purpose);
if (is_pack)
pp->primary_expression (PACK_EXPANSION_PATTERN (purpose));
else
pp->primary_expression (purpose);
pp_cxx_call_argument_list (pp, TREE_VALUE (t));
if (is_pack)
pp_cxx_ws_string (pp, "...");
if (TREE_CHAIN (t))
pp_cxx_separate_with (pp, ',');
}
}
/* function-definition:
decl-specifier-seq(opt) declarator ctor-initializer(opt) function-body
decl-specifier-seq(opt) declarator function-try-block */
static void
pp_cxx_function_definition (cxx_pretty_printer *pp, tree t)
{
tree saved_scope = pp->enclosing_scope;
pp->declaration_specifiers (t);
pp->declarator (t);
pp_needs_newline (pp) = true;
pp->enclosing_scope = DECL_CONTEXT (t);
if (DECL_SAVED_TREE (t))
pp->statement (DECL_SAVED_TREE (t));
else
pp_cxx_semicolon (pp);
pp_newline_and_flush (pp);
pp->enclosing_scope = saved_scope;
}
/* abstract-declarator:
ptr-operator abstract-declarator(opt)
direct-abstract-declarator */
void
cxx_pretty_printer::abstract_declarator (tree t)
{
/* pp_cxx_ptr_operator prints '(' for a pointer-to-member function,
or a pointer-to-data-member of array type:
void (X::*)()
int (X::*)[5]
but not for a pointer-to-data-member of non-array type:
int X::*
so be mindful of that. */
if (TYPE_PTRMEMFUNC_P (t)
|| (TYPE_PTRDATAMEM_P (t)
&& TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE))
pp_cxx_right_paren (this);
else if (INDIRECT_TYPE_P (t))
{
if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE
|| TREE_CODE (TREE_TYPE (t)) == FUNCTION_TYPE)
pp_cxx_right_paren (this);
t = TREE_TYPE (t);
}
direct_abstract_declarator (t);
}
/* direct-abstract-declarator:
direct-abstract-declarator(opt) ( parameter-declaration-clause )
cv-qualifier-seq(opt) exception-specification(opt)
direct-abstract-declarator(opt) [ constant-expression(opt) ]
( abstract-declarator ) */
void
cxx_pretty_printer::direct_abstract_declarator (tree t)
{
switch (TREE_CODE (t))
{
case REFERENCE_TYPE:
abstract_declarator (t);
break;
case RECORD_TYPE:
if (TYPE_PTRMEMFUNC_P (t))
direct_abstract_declarator (TYPE_PTRMEMFUNC_FN_TYPE (t));
break;
case OFFSET_TYPE:
if (TYPE_PTRDATAMEM_P (t))
direct_abstract_declarator (TREE_TYPE (t));
break;
case METHOD_TYPE:
case FUNCTION_TYPE:
pp_cxx_parameter_declaration_clause (this, t);
direct_abstract_declarator (TREE_TYPE (t));
if (TREE_CODE (t) == METHOD_TYPE)
{
padding = pp_before;
pp_cxx_cv_qualifier_seq (this, class_of_this_parm (t));
}
pp_cxx_exception_specification (this, t);
break;
case TYPENAME_TYPE:
case TEMPLATE_TYPE_PARM:
case TEMPLATE_TEMPLATE_PARM:
case BOUND_TEMPLATE_TEMPLATE_PARM:
case UNBOUND_CLASS_TEMPLATE:
case DECLTYPE_TYPE:
break;
default:
c_pretty_printer::direct_abstract_declarator (t);
break;
}
}
/* type-id:
type-specifier-seq abstract-declarator(opt) */
void
cxx_pretty_printer::type_id (tree t)
{
pp_flags saved_flags = flags;
flags |= pp_c_flag_abstract;
switch (TREE_CODE (t))
{
case TYPE_DECL:
case UNION_TYPE:
case RECORD_TYPE:
case ENUMERAL_TYPE:
case TYPENAME_TYPE:
case BOUND_TEMPLATE_TEMPLATE_PARM:
case UNBOUND_CLASS_TEMPLATE:
case TEMPLATE_TEMPLATE_PARM:
case TEMPLATE_TYPE_PARM:
case TEMPLATE_PARM_INDEX:
case TEMPLATE_DECL:
case TYPEOF_TYPE:
case UNDERLYING_TYPE:
case DECLTYPE_TYPE:
case TEMPLATE_ID_EXPR:
case OFFSET_TYPE:
pp_cxx_type_specifier_seq (this, t);
if (TYPE_PTRMEM_P (t))
abstract_declarator (t);
break;
case TYPE_PACK_EXPANSION:
type_id (PACK_EXPANSION_PATTERN (t));
pp_cxx_ws_string (this, "...");
break;
case TYPE_ARGUMENT_PACK:
{
tree args = ARGUMENT_PACK_ARGS (t);
int len = TREE_VEC_LENGTH (args);
pp_cxx_left_brace (this);
for (int i = 0; i < len; ++i)
{
if (i > 0)
pp_cxx_separate_with (this, ',');
type_id (TREE_VEC_ELT (args, i));
}
pp_cxx_right_brace (this);
}
break;
default:
c_pretty_printer::type_id (t);
break;
}
flags = saved_flags;
}
/* template-argument-list:
template-argument ...(opt)
template-argument-list, template-argument ...(opt)
template-argument:
assignment-expression
type-id
template-name */
static void
pp_cxx_template_argument_list (cxx_pretty_printer *pp, tree t)
{
int i;
bool need_comma = false;
if (t == NULL)
return;
for (i = 0; i < TREE_VEC_LENGTH (t); ++i)
{
tree arg = TREE_VEC_ELT (t, i);
tree argpack = NULL_TREE;
int idx, len = 1;
if (ARGUMENT_PACK_P (arg))
{
argpack = ARGUMENT_PACK_ARGS (arg);
len = TREE_VEC_LENGTH (argpack);
}
for (idx = 0; idx < len; idx++)
{
if (argpack)
arg = TREE_VEC_ELT (argpack, idx);
if (need_comma)
pp_cxx_separate_with (pp, ',');
else
need_comma = true;
if (TYPE_P (arg) || (TREE_CODE (arg) == TEMPLATE_DECL
&& TYPE_P (DECL_TEMPLATE_RESULT (arg))))
pp->type_id (arg);
else if (template_parm_object_p (arg))
pp->expression (DECL_INITIAL (arg));
else
pp->expression (arg);
}
}
}
static void
pp_cxx_exception_declaration (cxx_pretty_printer *pp, tree t)
{
t = DECL_EXPR_DECL (t);
pp_cxx_type_specifier_seq (pp, t);
if (TYPE_P (t))
pp->abstract_declarator (t);
else
pp->declarator (t);
}
/* Statements. */
void
cxx_pretty_printer::statement (tree t)
{
switch (TREE_CODE (t))
{
case CTOR_INITIALIZER:
pp_cxx_ctor_initializer (this, t);
break;
case USING_STMT:
pp_cxx_ws_string (this, "using");
pp_cxx_ws_string (this, "namespace");
if (DECL_CONTEXT (t))
pp_cxx_nested_name_specifier (this, DECL_CONTEXT (t));
pp_cxx_qualified_id (this, USING_STMT_NAMESPACE (t));
break;
case USING_DECL:
pp_cxx_ws_string (this, "using");
pp_cxx_nested_name_specifier (this, USING_DECL_SCOPE (t));
pp_cxx_unqualified_id (this, DECL_NAME (t));
break;
case EH_SPEC_BLOCK:
break;
/* try-block:
try compound-statement handler-seq */
case TRY_BLOCK:
pp_maybe_newline_and_indent (this, 0);
pp_cxx_ws_string (this, "try");
pp_newline_and_indent (this, 3);
statement (TRY_STMTS (t));
pp_newline_and_indent (this, -3);
if (CLEANUP_P (t))
;
else
statement (TRY_HANDLERS (t));
break;
/*
handler-seq:
handler handler-seq(opt)
handler:
catch ( exception-declaration ) compound-statement
exception-declaration:
type-specifier-seq declarator
type-specifier-seq abstract-declarator
... */
case HANDLER:
pp_cxx_ws_string (this, "catch");
pp_cxx_left_paren (this);
pp_cxx_exception_declaration (this, HANDLER_PARMS (t));
pp_cxx_right_paren (this);
pp_indentation (this) += 3;
pp_needs_newline (this) = true;
statement (HANDLER_BODY (t));
pp_indentation (this) -= 3;
pp_needs_newline (this) = true;
break;
/* selection-statement:
if ( expression ) statement
if ( expression ) statement else statement */
case IF_STMT:
pp_cxx_ws_string (this, "if");
pp_cxx_whitespace (this);
pp_cxx_left_paren (this);
expression (IF_COND (t));
pp_cxx_right_paren (this);
pp_newline_and_indent (this, 2);
statement (THEN_CLAUSE (t));
pp_newline_and_indent (this, -2);
if (ELSE_CLAUSE (t))
{
tree else_clause = ELSE_CLAUSE (t);
pp_cxx_ws_string (this, "else");
if (TREE_CODE (else_clause) == IF_STMT)
pp_cxx_whitespace (this);
else
pp_newline_and_indent (this, 2);
statement (else_clause);
if (TREE_CODE (else_clause) != IF_STMT)
pp_newline_and_indent (this, -2);
}
break;
case RANGE_FOR_STMT:
pp_cxx_ws_string (this, "for");
pp_space (this);
pp_cxx_left_paren (this);
if (RANGE_FOR_INIT_STMT (t))
{
statement (RANGE_FOR_INIT_STMT (t));
pp_needs_newline (this) = false;
pp_cxx_whitespace (this);
}
statement (RANGE_FOR_DECL (t));
pp_space (this);
pp_needs_newline (this) = false;
pp_colon (this);
pp_space (this);
statement (RANGE_FOR_EXPR (t));
pp_cxx_right_paren (this);
pp_newline_and_indent (this, 3);
statement (FOR_BODY (t));
pp_indentation (this) -= 3;
pp_needs_newline (this) = true;
break;
/* expression-statement:
expression(opt) ; */
case EXPR_STMT:
expression (EXPR_STMT_EXPR (t));
pp_cxx_semicolon (this);
pp_needs_newline (this) = true;
break;
case CLEANUP_STMT:
pp_cxx_ws_string (this, "try");
pp_newline_and_indent (this, 2);
statement (CLEANUP_BODY (t));
pp_newline_and_indent (this, -2);
pp_cxx_ws_string (this, CLEANUP_EH_ONLY (t) ? "catch" : "finally");
pp_newline_and_indent (this, 2);
statement (CLEANUP_EXPR (t));
pp_newline_and_indent (this, -2);
break;
case STATIC_ASSERT:
declaration (t);
break;
case OMP_DEPOBJ:
pp_cxx_ws_string (this, "#pragma omp depobj");
pp_space (this);
pp_cxx_left_paren (this);
expression (OMP_DEPOBJ_DEPOBJ (t));
pp_cxx_right_paren (this);
if (OMP_DEPOBJ_CLAUSES (t) && OMP_DEPOBJ_CLAUSES (t) != error_mark_node)
{
if (TREE_CODE (OMP_DEPOBJ_CLAUSES (t)) == OMP_CLAUSE)
dump_omp_clauses (this, OMP_DEPOBJ_CLAUSES (t),
pp_indentation (this), TDF_NONE);
else
switch (tree_to_uhwi (OMP_DEPOBJ_CLAUSES (t)))
{
case OMP_CLAUSE_DEPEND_IN:
pp_cxx_ws_string (this, " update(in)");
break;
case OMP_CLAUSE_DEPEND_INOUT:
pp_cxx_ws_string (this, " update(inout)");
break;
case OMP_CLAUSE_DEPEND_OUT:
pp_cxx_ws_string (this, " update(out)");
break;
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
pp_cxx_ws_string (this, " update(mutexinoutset)");
break;
case OMP_CLAUSE_DEPEND_LAST:
pp_cxx_ws_string (this, " destroy");
break;
default:
break;
}
}
pp_needs_newline (this) = true;
break;
default:
c_pretty_printer::statement (t);
break;
}
}
/* original-namespace-definition:
namespace identifier { namespace-body }
As an edge case, we also handle unnamed namespace definition here. */
static void
pp_cxx_original_namespace_definition (cxx_pretty_printer *pp, tree t)
{
pp_cxx_ws_string (pp, "namespace");
if (DECL_CONTEXT (t))
pp_cxx_nested_name_specifier (pp, DECL_CONTEXT (t));
if (DECL_NAME (t))
pp_cxx_unqualified_id (pp, t);
pp_cxx_whitespace (pp);
pp_cxx_left_brace (pp);
/* We do not print the namespace-body. */
pp_cxx_whitespace (pp);
pp_cxx_right_brace (pp);
}
/* namespace-alias:
identifier
namespace-alias-definition:
namespace identifier = qualified-namespace-specifier ;
qualified-namespace-specifier:
::(opt) nested-name-specifier(opt) namespace-name */
static void
pp_cxx_namespace_alias_definition (cxx_pretty_printer *pp, tree t)
{
pp_cxx_ws_string (pp, "namespace");
if (DECL_CONTEXT (t))
pp_cxx_nested_name_specifier (pp, DECL_CONTEXT (t));
pp_cxx_unqualified_id (pp, t);
pp_cxx_whitespace (pp);
pp_equal (pp);
pp_cxx_whitespace (pp);
if (DECL_CONTEXT (DECL_NAMESPACE_ALIAS (t)))
pp_cxx_nested_name_specifier (pp,
DECL_CONTEXT (DECL_NAMESPACE_ALIAS (t)));
pp_cxx_qualified_id (pp, DECL_NAMESPACE_ALIAS (t));
pp_cxx_semicolon (pp);
}
/* simple-declaration:
decl-specifier-seq(opt) init-declarator-list(opt) */
static void
pp_cxx_simple_declaration (cxx_pretty_printer *pp, tree t)
{
pp->declaration_specifiers (t);
pp_cxx_init_declarator (pp, t);
pp_cxx_semicolon (pp);
pp_needs_newline (pp) = true;
}
/*
template-parameter-list:
template-parameter
template-parameter-list , template-parameter */
static inline void
pp_cxx_template_parameter_list (cxx_pretty_printer *pp, tree t)
{
const int n = TREE_VEC_LENGTH (t);
int i;
for (i = 0; i < n; ++i)
{
if (i)
pp_cxx_separate_with (pp, ',');
pp_cxx_template_parameter (pp, TREE_VEC_ELT (t, i));
}
}
/* template-parameter:
type-parameter
parameter-declaration
type-parameter:
class ...(opt) identifier(opt)
class identifier(opt) = type-id
typename identifier(opt)
typename ...(opt) identifier(opt) = type-id
template < template-parameter-list > class ...(opt) identifier(opt)
template < template-parameter-list > class identifier(opt) = template-name */
static void
pp_cxx_template_parameter (cxx_pretty_printer *pp, tree t)
{
tree parameter = TREE_VALUE (t);
switch (TREE_CODE (parameter))
{
case TYPE_DECL:
pp_cxx_ws_string (pp, "class");
if (TEMPLATE_TYPE_PARAMETER_PACK (TREE_TYPE (t)))
pp_cxx_ws_string (pp, "...");
if (DECL_NAME (parameter))
pp_cxx_tree_identifier (pp, DECL_NAME (parameter));
/* FIXME: Check if we should print also default argument. */
break;
case PARM_DECL:
pp_cxx_parameter_declaration (pp, parameter);
break;
case TEMPLATE_DECL:
break;
default:
pp_unsupported_tree (pp, t);
break;
}
}
/* Pretty-print a template parameter in the canonical form
"template-parameter-<level>-<position in parameter list>". */
void
pp_cxx_canonical_template_parameter (cxx_pretty_printer *pp, tree parm)
{
const enum tree_code code = TREE_CODE (parm);
/* Brings type template parameters to the canonical forms. */
if (code == TEMPLATE_TYPE_PARM || code == TEMPLATE_TEMPLATE_PARM
|| code == BOUND_TEMPLATE_TEMPLATE_PARM)
parm = TEMPLATE_TYPE_PARM_INDEX (parm);
pp_cxx_begin_template_argument_list (pp);
pp->translate_string ("template-parameter-");
pp_wide_integer (pp, TEMPLATE_PARM_LEVEL (parm));
pp_minus (pp);
pp_wide_integer (pp, TEMPLATE_PARM_IDX (parm) + 1);
pp_cxx_end_template_argument_list (pp);
}
/* Print a constrained-type-specifier. */
void
pp_cxx_constrained_type_spec (cxx_pretty_printer *pp, tree c)
{
pp_cxx_whitespace (pp);
pp_cxx_left_bracket (pp);
pp->translate_string ("requires");
pp_cxx_whitespace (pp);
if (c == error_mark_node)
{
pp_cxx_ws_string(pp, "<unsatisfied-type-constraint>");
return;
}
tree t, a;
placeholder_extract_concept_and_args (c, t, a);
pp->id_expression (t);
pp_cxx_begin_template_argument_list (pp);
pp_cxx_ws_string (pp, "<placeholder>");
pp_cxx_separate_with (pp, ',');
tree args = make_tree_vec (TREE_VEC_LENGTH (a) - 1);
for (int i = 0; i < TREE_VEC_LENGTH (a) - 1; ++i)
TREE_VEC_ELT (args, i) = TREE_VEC_ELT (a, i + 1);
pp_cxx_template_argument_list (pp, args);
ggc_free (args);
pp_cxx_end_template_argument_list (pp);
pp_cxx_right_bracket (pp);
}
/*
template-declaration:
export(opt) template < template-parameter-list > declaration
Concept extensions:
template-declaration:
export(opt) template < template-parameter-list >
requires-clause(opt) declaration */
static void
pp_cxx_template_declaration (cxx_pretty_printer *pp, tree t)
{
tree tmpl = most_general_template (t);
tree level;
pp_maybe_newline_and_indent (pp, 0);
for (level = DECL_TEMPLATE_PARMS (tmpl); level; level = TREE_CHAIN (level))
{
pp_cxx_ws_string (pp, "template");
pp_cxx_begin_template_argument_list (pp);
pp_cxx_template_parameter_list (pp, TREE_VALUE (level));
pp_cxx_end_template_argument_list (pp);
pp_newline_and_indent (pp, 3);
}
if (flag_concepts)
if (tree ci = get_constraints (t))
if (tree reqs = CI_TEMPLATE_REQS (ci))
{
pp_cxx_requires_clause (pp, reqs);
pp_newline_and_indent (pp, 6);
}
if (TREE_CODE (t) == FUNCTION_DECL && DECL_SAVED_TREE (t))
pp_cxx_function_definition (pp, t);
else if (TREE_CODE (t) == CONCEPT_DECL)
pp_cxx_concept_definition (pp, t);
else
pp_cxx_simple_declaration (pp, t);
}
static void
pp_cxx_explicit_specialization (cxx_pretty_printer *pp, tree t)
{
pp_unsupported_tree (pp, t);
}
static void
pp_cxx_explicit_instantiation (cxx_pretty_printer *pp, tree t)
{
pp_unsupported_tree (pp, t);
}
static void
pp_cxx_concept_definition (cxx_pretty_printer *pp, tree t)
{
pp_cxx_unqualified_id (pp, DECL_NAME (t));
pp_cxx_whitespace (pp);
pp_cxx_ws_string (pp, "=");
pp_cxx_whitespace (pp);
pp->expression (DECL_INITIAL (t));
pp_cxx_semicolon (pp);
}
/*
declaration:
block-declaration
function-definition
template-declaration
explicit-instantiation
explicit-specialization
linkage-specification
namespace-definition
block-declaration:
simple-declaration
asm-definition
namespace-alias-definition
using-declaration
using-directive
static_assert-declaration */
void
cxx_pretty_printer::declaration (tree t)
{
if (TREE_CODE (t) == STATIC_ASSERT)
{
pp_cxx_ws_string (this, "static_assert");
pp_cxx_left_paren (this);
expression (STATIC_ASSERT_CONDITION (t));
pp_cxx_separate_with (this, ',');
expression (STATIC_ASSERT_MESSAGE (t));
pp_cxx_right_paren (this);
}
else if (!DECL_LANG_SPECIFIC (t))
pp_cxx_simple_declaration (this, t);
else if (DECL_USE_TEMPLATE (t))
switch (DECL_USE_TEMPLATE (t))
{
case 1:
pp_cxx_template_declaration (this, t);
break;
case 2:
pp_cxx_explicit_specialization (this, t);
break;
case 3:
pp_cxx_explicit_instantiation (this, t);
break;
default:
break;
}
else switch (TREE_CODE (t))
{
case VAR_DECL:
case TYPE_DECL:
pp_cxx_simple_declaration (this, t);
break;
case FUNCTION_DECL:
if (DECL_SAVED_TREE (t))
pp_cxx_function_definition (this, t);
else
pp_cxx_simple_declaration (this, t);
break;
case NAMESPACE_DECL:
if (DECL_NAMESPACE_ALIAS (t))
pp_cxx_namespace_alias_definition (this, t);
else
pp_cxx_original_namespace_definition (this, t);
break;
default:
pp_unsupported_tree (this, t);
break;
}
}
static void
pp_cxx_typeid_expression (cxx_pretty_printer *pp, tree t)
{
t = TREE_OPERAND (t, 0);
pp_cxx_ws_string (pp, "typeid");
pp_cxx_left_paren (pp);
if (TYPE_P (t))
pp->type_id (t);
else
pp->expression (t);
pp_cxx_right_paren (pp);
}
void
pp_cxx_va_arg_expression (cxx_pretty_printer *pp, tree t)
{
pp_cxx_ws_string (pp, "va_arg");
pp_cxx_left_paren (pp);
pp->assignment_expression (TREE_OPERAND (t, 0));
pp_cxx_separate_with (pp, ',');
pp->type_id (TREE_TYPE (t));
pp_cxx_right_paren (pp);
}
static bool
pp_cxx_offsetof_expression_1 (cxx_pretty_printer *pp, tree t)
{
switch (TREE_CODE (t))
{
case ARROW_EXPR:
if (TREE_CODE (TREE_OPERAND (t, 0)) == STATIC_CAST_EXPR
&& INDIRECT_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0))))
{
pp->type_id (TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))));
pp_cxx_separate_with (pp, ',');
return true;
}
return false;
case COMPONENT_REF:
if (!pp_cxx_offsetof_expression_1 (pp, TREE_OPERAND (t, 0)))
return false;
if (TREE_CODE (TREE_OPERAND (t, 0)) != ARROW_EXPR)
pp_cxx_dot (pp);
pp->expression (TREE_OPERAND (t, 1));
return true;
case ARRAY_REF:
if (!pp_cxx_offsetof_expression_1 (pp, TREE_OPERAND (t, 0)))
return false;
pp_left_bracket (pp);
pp->expression (TREE_OPERAND (t, 1));
pp_right_bracket (pp);
return true;
default:
return false;
}
}
void
pp_cxx_offsetof_expression (cxx_pretty_printer *pp, tree t)
{
pp_cxx_ws_string (pp, "offsetof");
pp_cxx_left_paren (pp);
if (!pp_cxx_offsetof_expression_1 (pp, TREE_OPERAND (t, 0)))
pp->expression (TREE_OPERAND (t, 0));
pp_cxx_right_paren (pp);
}
void
pp_cxx_addressof_expression (cxx_pretty_printer *pp, tree t)
{
pp_cxx_ws_string (pp, "__builtin_addressof");
pp_cxx_left_paren (pp);
pp->expression (TREE_OPERAND (t, 0));
pp_cxx_right_paren (pp);
}
static char const*
get_fold_operator (tree t)
{
int op = int_cst_value (FOLD_EXPR_OP (t));
ovl_op_info_t *info = OVL_OP_INFO (FOLD_EXPR_MODIFY_P (t), op);
return info->name;
}
void
pp_cxx_unary_left_fold_expression (cxx_pretty_printer *pp, tree t)
{
char const* op = get_fold_operator (t);
tree expr = PACK_EXPANSION_PATTERN (FOLD_EXPR_PACK (t));
pp_cxx_left_paren (pp);
pp_cxx_ws_string (pp, "...");
pp_cxx_ws_string (pp, op);
pp->expression (expr);
pp_cxx_right_paren (pp);
}
void
pp_cxx_unary_right_fold_expression (cxx_pretty_printer *pp, tree t)
{
char const* op = get_fold_operator (t);
tree expr = PACK_EXPANSION_PATTERN (FOLD_EXPR_PACK (t));
pp_cxx_left_paren (pp);
pp->expression (expr);
pp_space (pp);
pp_cxx_ws_string (pp, op);
pp_cxx_ws_string (pp, "...");
pp_cxx_right_paren (pp);
}
void
pp_cxx_binary_fold_expression (cxx_pretty_printer *pp, tree t)
{
char const* op = get_fold_operator (t);
tree t1 = TREE_OPERAND (t, 1);
tree t2 = TREE_OPERAND (t, 2);
if (t1 == FOLD_EXPR_PACK (t))
t1 = PACK_EXPANSION_PATTERN (t1);
else
t2 = PACK_EXPANSION_PATTERN (t2);
pp_cxx_left_paren (pp);
pp->expression (t1);
pp_cxx_ws_string (pp, op);
pp_cxx_ws_string (pp, "...");
pp_cxx_ws_string (pp, op);
pp->expression (t2);
pp_cxx_right_paren (pp);
}
void
pp_cxx_trait_expression (cxx_pretty_printer *pp, tree t)
{
cp_trait_kind kind = TRAIT_EXPR_KIND (t);
switch (kind)
{
case CPTK_HAS_NOTHROW_ASSIGN:
pp_cxx_ws_string (pp, "__has_nothrow_assign");
break;
case CPTK_HAS_TRIVIAL_ASSIGN:
pp_cxx_ws_string (pp, "__has_trivial_assign");
break;
case CPTK_HAS_NOTHROW_CONSTRUCTOR:
pp_cxx_ws_string (pp, "__has_nothrow_constructor");
break;
case CPTK_HAS_TRIVIAL_CONSTRUCTOR:
pp_cxx_ws_string (pp, "__has_trivial_constructor");
break;
case CPTK_HAS_NOTHROW_COPY:
pp_cxx_ws_string (pp, "__has_nothrow_copy");
break;
case CPTK_HAS_TRIVIAL_COPY:
pp_cxx_ws_string (pp, "__has_trivial_copy");
break;
case CPTK_HAS_TRIVIAL_DESTRUCTOR:
pp_cxx_ws_string (pp, "__has_trivial_destructor");
break;
case CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS:
pp_cxx_ws_string (pp, "__has_unique_object_representations");
break;
case CPTK_HAS_VIRTUAL_DESTRUCTOR:
pp_cxx_ws_string (pp, "__has_virtual_destructor");
break;
case CPTK_IS_ABSTRACT:
pp_cxx_ws_string (pp, "__is_abstract");
break;
case CPTK_IS_AGGREGATE:
pp_cxx_ws_string (pp, "__is_aggregate");
break;
case CPTK_IS_BASE_OF:
pp_cxx_ws_string (pp, "__is_base_of");
break;
case CPTK_IS_CLASS:
pp_cxx_ws_string (pp, "__is_class");
break;
case CPTK_IS_EMPTY:
pp_cxx_ws_string (pp, "__is_empty");
break;
case CPTK_IS_ENUM:
pp_cxx_ws_string (pp, "__is_enum");
break;
case CPTK_IS_FINAL:
pp_cxx_ws_string (pp, "__is_final");
break;
case CPTK_IS_POD:
pp_cxx_ws_string (pp, "__is_pod");
break;
case CPTK_IS_POLYMORPHIC:
pp_cxx_ws_string (pp, "__is_polymorphic");
break;
case CPTK_IS_SAME_AS:
pp_cxx_ws_string (pp, "__is_same");
break;
case CPTK_IS_STD_LAYOUT:
pp_cxx_ws_string (pp, "__is_std_layout");
break;
case CPTK_IS_TRIVIAL:
pp_cxx_ws_string (pp, "__is_trivial");
break;
case CPTK_IS_TRIVIALLY_ASSIGNABLE:
pp_cxx_ws_string (pp, "__is_trivially_assignable");
break;
case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE:
pp_cxx_ws_string (pp, "__is_trivially_constructible");
break;
case CPTK_IS_TRIVIALLY_COPYABLE:
pp_cxx_ws_string (pp, "__is_trivially_copyable");
break;
case CPTK_IS_UNION:
pp_cxx_ws_string (pp, "__is_union");
break;
case CPTK_IS_LITERAL_TYPE:
pp_cxx_ws_string (pp, "__is_literal_type");
break;
case CPTK_IS_ASSIGNABLE:
pp_cxx_ws_string (pp, "__is_assignable");
break;
case CPTK_IS_CONSTRUCTIBLE:
pp_cxx_ws_string (pp, "__is_constructible");
break;
default:
gcc_unreachable ();
}
pp_cxx_left_paren (pp);
pp->type_id (TRAIT_EXPR_TYPE1 (t));
if (kind == CPTK_IS_BASE_OF || kind == CPTK_IS_SAME_AS)
{
pp_cxx_separate_with (pp, ',');
pp->type_id (TRAIT_EXPR_TYPE2 (t));
}
pp_cxx_right_paren (pp);
}
// requires-clause:
// 'requires' logical-or-expression
void
pp_cxx_requires_clause (cxx_pretty_printer *pp, tree t)
{
if (!t)
return;
pp->padding = pp_before;
pp_cxx_ws_string (pp, "requires");
pp_space (pp);
pp->expression (t);
}
/* requirement:
simple-requirement
compound-requirement
type-requirement
nested-requirement */
static void
pp_cxx_requirement (cxx_pretty_printer *pp, tree t)
{
switch (TREE_CODE (t))
{
case SIMPLE_REQ:
pp_cxx_simple_requirement (pp, t);
break;
case TYPE_REQ:
pp_cxx_type_requirement (pp, t);
break;
case COMPOUND_REQ:
pp_cxx_compound_requirement (pp, t);
break;
case NESTED_REQ:
pp_cxx_nested_requirement (pp, t);
break;
default:
gcc_unreachable ();
}
}
// requirement-list:
// requirement
// requirement-list ';' requirement[opt]
//
static void
pp_cxx_requirement_list (cxx_pretty_printer *pp, tree t)
{
for (; t; t = TREE_CHAIN (t))
pp_cxx_requirement (pp, TREE_VALUE (t));
}
// requirement-body:
// '{' requirement-list '}'
static void
pp_cxx_requirement_body (cxx_pretty_printer *pp, tree t)
{
pp_cxx_left_brace (pp);
pp_cxx_requirement_list (pp, t);
pp_cxx_right_brace (pp);
}
// requires-expression:
// 'requires' requirement-parameter-list requirement-body
void
pp_cxx_requires_expr (cxx_pretty_printer *pp, tree t)
{
pp_string (pp, "requires");
if (tree parms = TREE_OPERAND (t, 0))
{
pp_cxx_parameter_declaration_clause (pp, parms);
pp_cxx_whitespace (pp);
}
pp_cxx_requirement_body (pp, TREE_OPERAND (t, 1));
}
/* simple-requirement:
expression ';' */
void
pp_cxx_simple_requirement (cxx_pretty_printer *pp, tree t)
{
pp->expression (TREE_OPERAND (t, 0));
pp_cxx_semicolon (pp);
}
/* type-requirement:
typename type-name ';' */
void
pp_cxx_type_requirement (cxx_pretty_printer *pp, tree t)
{
pp->type_id (TREE_OPERAND (t, 0));
pp_cxx_semicolon (pp);
}
/* compound-requirement:
'{' expression '}' 'noexcept' [opt] trailing-return-type [opt] */
void
pp_cxx_compound_requirement (cxx_pretty_printer *pp, tree t)
{
pp_cxx_left_brace (pp);
pp->expression (TREE_OPERAND (t, 0));
pp_cxx_right_brace (pp);
if (COMPOUND_REQ_NOEXCEPT_P (t))
pp_cxx_ws_string (pp, "noexcept");
if (tree type = TREE_OPERAND (t, 1))
{
pp_cxx_whitespace (pp);
pp_cxx_ws_string (pp, "->");
pp->type_id (type);
}
pp_cxx_semicolon (pp);
}
/* nested requirement:
'requires' constraint-expression */
void
pp_cxx_nested_requirement (cxx_pretty_printer *pp, tree t)
{
pp_cxx_ws_string (pp, "requires");
pp->expression (TREE_OPERAND (t, 0));
pp_cxx_semicolon (pp);
}
void
pp_cxx_check_constraint (cxx_pretty_printer *pp, tree t)
{
tree decl = CHECK_CONSTR_CONCEPT (t);
tree tmpl = DECL_TI_TEMPLATE (decl);
tree args = CHECK_CONSTR_ARGS (t);
tree id = build_nt (TEMPLATE_ID_EXPR, tmpl, args);
if (TREE_CODE (decl) == CONCEPT_DECL)
pp->expression (id);
else if (VAR_P (decl))
pp->expression (id);
else if (TREE_CODE (decl) == FUNCTION_DECL)
{
tree call = build_vl_exp (CALL_EXPR, 2);
TREE_OPERAND (call, 0) = integer_two_node;
TREE_OPERAND (call, 1) = id;
pp->expression (call);
}
else
gcc_unreachable ();
}
/* Output the "[with ...]" clause for a parameter mapping of an atomic
constraint. */
void
pp_cxx_parameter_mapping (cxx_pretty_printer *pp, tree map)
{
pp_cxx_whitespace (pp);
pp_cxx_left_bracket (pp);
pp->translate_string ("with");
pp_cxx_whitespace (pp);
for (tree p = map; p; p = TREE_CHAIN (p))
{
tree parm = TREE_VALUE (p);
tree arg = TREE_PURPOSE (p);
if (TYPE_P (parm))
pp->type_id (parm);
else
pp_cxx_tree_identifier (pp, DECL_NAME (TEMPLATE_PARM_DECL (parm)));
pp_cxx_whitespace (pp);
pp_equal (pp);
pp_cxx_whitespace (pp);
if (TYPE_P (arg) || DECL_TEMPLATE_TEMPLATE_PARM_P (arg))
pp->type_id (arg);
else
pp->expression (arg);
if (TREE_CHAIN (p) != NULL_TREE)
pp_cxx_separate_with (pp, ';');
}
pp_cxx_right_bracket (pp);
}
void
pp_cxx_atomic_constraint (cxx_pretty_printer *pp, tree t)
{
/* Emit the expression. */
pp->expression (ATOMIC_CONSTR_EXPR (t));
/* Emit the parameter mapping. */
tree map = ATOMIC_CONSTR_MAP (t);
if (map && map != error_mark_node)
pp_cxx_parameter_mapping (pp, map);
}
void
pp_cxx_conjunction (cxx_pretty_printer *pp, tree t)
{
pp_cxx_constraint (pp, TREE_OPERAND (t, 0));
pp_string (pp, " /\\ ");
pp_cxx_constraint (pp, TREE_OPERAND (t, 1));
}
void
pp_cxx_disjunction (cxx_pretty_printer *pp, tree t)
{
pp_cxx_constraint (pp, TREE_OPERAND (t, 0));
pp_string (pp, " \\/ ");
pp_cxx_constraint (pp, TREE_OPERAND (t, 1));
}
void
pp_cxx_constraint (cxx_pretty_printer *pp, tree t)
{
if (t == error_mark_node)
return pp->expression (t);
switch (TREE_CODE (t))
{
case ATOMIC_CONSTR:
pp_cxx_atomic_constraint (pp, t);
break;
case CHECK_CONSTR:
pp_cxx_check_constraint (pp, t);
break;
case CONJ_CONSTR:
pp_cxx_conjunction (pp, t);
break;
case DISJ_CONSTR:
pp_cxx_disjunction (pp, t);
break;
case EXPR_PACK_EXPANSION:
pp->expression (TREE_OPERAND (t, 0));
break;
default:
gcc_unreachable ();
}
}
typedef c_pretty_print_fn pp_fun;
/* Initialization of a C++ pretty-printer object. */
cxx_pretty_printer::cxx_pretty_printer ()
: c_pretty_printer (),
enclosing_scope (global_namespace)
{
type_specifier_seq = (pp_fun) pp_cxx_type_specifier_seq;
parameter_list = (pp_fun) pp_cxx_parameter_declaration_clause;
}
/* cxx_pretty_printer's implementation of pretty_printer::clone vfunc. */
pretty_printer *
cxx_pretty_printer::clone () const
{
return new cxx_pretty_printer (*this);
}
|
A1_3.c | // gcc -std=c99 -Wall -lm -fopenmp -o go A1_3.c
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <time.h>
void display(int, int *);
void swap(int *, int *);
unsigned long long tick(void)
{
unsigned long long d;
__asm__ __volatile__("rdtsc": "=A"(d));
return d;
}
int main(int argc, char *argv[])
{
int n = 10;
int* v = (int *)malloc(n);
// initialize random seed based on current time
srand((unsigned) time(NULL));
for (int i = 0; i < n; ++i)
{
v[i] = rand() % 20; //RAND_MAX;
}
display(n, v);
unsigned long long start = tick();
// Gnome sort (dubbed stupid sort)
int i = 1, j = 2;
while (i < n)
{
if (v[i - 1] < v[i])
{
i = j++;
}
else
{
// swap [i-1] and [i]
swap(&v[i], &v[i-1]);
if (--i == 0)
i = j++;
}
}
// #pragma omp parallel for private(i, j)
// {
// for
// // parallelize this for loop
// #pragma omp for
// }
double time = (double)(tick() - start);
display(n, v);
printf("Time: %.2f\n", time);
free(v);
return 0;
}
void display(int n, int *v)
{
for (int i = 0; i < n; ++i)
printf("%d\t", v[i]);
printf("\n");
}
void swap(int *x, int *y)
{
int z = *x;
*x = *y;
*y = z;
} |
matmul.c | #include "wtime.h"
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#define N 500
#define STEPS 10
/* IMPORTANTE
* ----------
* La memoria es unidimensional, y en C las matrices multidimensionales se
* representan de forma row-major: las filas se guardan de forma contigua.
*
* Matriz A 2x3:
* a11 a12 a13
* a21 a22 a23
*
* A en lenguajes row-major que cuentan desde 0 (C, C++):
* Memoria: [a11 a12 a13 a21 a22 a23]
* Indices: 0 1 2 3 4 5
*
* A en lenguajes column-major que cuentan desde 1 (Fortran, Julia):
* Memoria: [a11 a21 a12 a22 a13 a23]
* Indices: 1 2 3 4 5 6
*
*/
void init_a(float *m)
{
for (int y = 0; y < N; ++y)
{
for (int x = 0; x < N; ++x)
{
// m_{y,x} = y
m[y * N + x] = y + 1.0f;
}
}
}
void init_b(float *m)
{
for (int y = 0; y < N; ++y)
{
for (int x = 0; x < N; ++x)
{
// Triangulo superior U = 1, resto 0
if (x < y)
{
m[y * N + x] = 0.0f;
}
else
{
m[y * N + x] = 1.0f;
}
}
}
}
//inicializa b pero de manera transpuesta bt[x,y]=b[y,x]
void init_b_transposed(float *bt)
{
for (int y = 0; y < N; ++y)
{
for (int x = 0; x < N; ++x)
{
// Triangulo superior U = 1, resto 0
if (x <= y)
{
bt[y * N + x] = 1.0f;
}
else
{
bt[y * N + x] = 0.0f;
}
}
}
}
void init_c(float *m)
{
// C = 0
for (int idx = 0; idx < N * N; ++idx)
{
m[idx] = 0.0f;
}
}
bool check_result(const float *m)
{
bool pass = true;
for (int y = 0; y < N; ++y)
{
for (int x = 0; x < N; ++x)
{
float expected = (x + 1) * (y + 1) * STEPS;
float got = m[y * N + x];
if (got != expected)
{
printf("%d,%d: got %f, expected %f\n", y, x, got, expected);
pass = false;
}
}
}
return pass;
}
void matmul_naive(const float *a, const float *b, float *c)
{
/* FALTA: calcular C = A*B + C */
for (int y = 0; y < N; ++y)
{
for (int x = 0; x < N; ++x)
{
for (int k = 0; k < N; ++k)
{
c[y * N + x] += a[y * N + k] * b[k * N + x];
}
}
}
}
void matmul_naive_omp(const float *a, const float *b, float *c)
{
/* FALTA: calcular C = A*B + C */
#pragma omp parallel for default(none) \
schedule(dynamic) \
shared(c, a, b)
for (int y = 0; y < N; ++y)
{
for (int x = 0; x < N; ++x)
{
for (int k = 0; k < N; ++k)
{
c[y * N + x] += a[y * N + k] * b[k * N + x];
}
}
}
}
// realiza el producto pero trasponiendo la matriz bt
// C = A*(B^T)^T + C
// lo que equivale a:
// C = A*B + C
void matmul_transposed(const float *a, const float *bt, float *c)
{
/* FALTA: calcular C = A*B + C */
for (int y = 0; y < N; ++y)
{
for (int x = 0; x < N; ++x)
{
for (int k = 0; k < N; ++k)
{
// la idea es evitar que puntero sobre el que
// itera k de saltos (evitar cache missing y TLB missing)
c[y * N + x] += a[y * N + k] * bt[x * N + k];
}
}
}
}
void matmul_transposed_ptr(const float *a, const float *bt, float *c)
{
/* FALTA: calcular C = A*B + C */
for (int y = 0; y < N; ++y)
{
for (int x = 0; x < N; ++x)
{
for (int k = 0; k < N; ++k)
{
// la idea es evitar que puntero sobre el que
// itera k de saltos (evitar cache missing y TLB missing)
*(c + y * N + x) += (*(a + y * N + k)) * (*(bt + x * N + k));
}
}
}
}
void matmul_transposed_omp(const float *a, const float *bt, float *c)
{
/* FALTA: calcular C = A*B + C */
#pragma omp parallel for default(none) \
schedule(dynamic) \
shared(c, a, bt)
for (int y = 0; y < N; ++y)
{
for (int x = 0; x < N; ++x)
{
for (int k = 0; k < N; ++k)
{
c[y * N + x] += a[y * N + k] * bt[x * N + k];
}
}
}
}
int main()
{
size_t matsize = N * N * sizeof(float);
/* pedir memoria para las matrices */
float *a = malloc(matsize);
float *b = malloc(matsize);
float *c = malloc(matsize);
/* inicializar valores */
init_a(a);
init_b(b);
init_c(c);
double start = wtime();
for (int i = 0; i < STEPS; ++i)
{
matmul_naive(a, b, c);
}
double end = wtime();
double elapsed = end - start;
double operations = STEPS * (2.0 * N * N * N + N * N);
double gflops = operations / (1000.0 * 1000.0 * 1000.0 * elapsed);
if (check_result(c))
{
printf("Naive run: %f GFLOPS\n", gflops);
}
else
{
printf("Resultado incorrecto!\n");
}
/* inicializar valores */
init_a(a);
init_b(b);
init_c(c);
start = wtime();
for (int i = 0; i < STEPS; ++i)
{
matmul_naive_omp(a, b, c);
}
end = wtime();
elapsed = end - start;
gflops = operations / (1000.0 * 1000.0 * 1000.0 * elapsed);
if (check_result(c))
{
printf("Naive omp run: %f GFLOPS\n", gflops);
}
else
{
printf("Resultado incorrecto!\n");
}
/* inicializar valores */
init_a(a);
init_b_transposed(b);
init_c(c);
start = wtime();
for (int i = 0; i < STEPS; ++i)
{
matmul_transposed(a, b, c);
}
end = wtime();
elapsed = end - start;
gflops = operations / (1000.0 * 1000.0 * 1000.0 * elapsed);
if (check_result(c))
{
printf("Transposed run: %f GFLOPS\n", gflops);
}
else
{
printf("Resultado incorrecto!\n");
}
init_a(a);
init_b_transposed(b);
init_c(c);
start = wtime();
for (int i = 0; i < STEPS; ++i)
{
matmul_transposed_omp(a, b, c);
}
end = wtime();
elapsed = end - start;
gflops = operations / (1000.0 * 1000.0 * 1000.0 * elapsed);
if (check_result(c))
{
printf("Transposed OMP run: %f GFLOPS\n", gflops);
}
else
{
printf("Resultado incorrecto!\n");
}
/* devolver algun resultado para que el compilador no descarte codigo */
return (int)c[0];
}
|
TiledFrameBuffer.h | #pragma once
#include <cstdint>
#include <cassert>
#include <vector>
#include "alloc16.h"
#include "LiteMath.h"
/**
\brief Frame Buffer
\param PackedColor -- uint32_t, uint16_t, uint8_t
\param FB_BIN_SIZE -- bin size; if 0, bins are not used and the framebuffer become 1-lvl!
\param FB_TILE_SIZE_X -- small tile size at x axis
\param FB_TILE_SIZE_Y -- small tile size at y axis
*/
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
struct FrameBufferTwoLvl
{
using ColorType = PackedColor;
void Resize(int a_x, int a_y);
void CopyToPitchLinear(uint32_t* a_data, int a_pitch, bool invertY = true);
void ClearColor (uint32_t a_color);
void ClearColorAndDepth(uint32_t a_color, float a_depth);
inline PackedColor* TileColor(int x, int y) { return m_color.data() + TileOffset(x,y); }
inline float* TileDepth(int x, int y) { return m_depth.data() + TileOffset(x,y); }
inline PackedColor* PixelColor(int x, int y) { return m_color.data() + PixelOffset(x,y); }
inline float* PixelDepth(int x, int y) { return m_depth.data() + PixelOffset(x,y); }
private:
constexpr static int TILES_IN_BIN_X = FB_BIN_SIZE/FB_TILE_SIZE_X;
constexpr static int TILES_IN_BIN_Y = FB_BIN_SIZE/FB_TILE_SIZE_Y;
constexpr static int PIXS_IN_TILE = FB_TILE_SIZE_X*FB_TILE_SIZE_Y;
constexpr static int TILES_IN_BIN = TILES_IN_BIN_X*TILES_IN_BIN_Y;
constexpr static int ALIGN_OF_TILE = sizeof(PackedColor)*(FB_TILE_SIZE_X*FB_TILE_SIZE_Y);
std::vector<float, aligned<float, 64> > m_depth;
std::vector<uint32_t, aligned<PackedColor, ALIGN_OF_TILE> > m_color;
int m_width;
int m_height;
int m_binsX;
int m_binsY;
int m_tilesTotalX;
int m_tilesTotalY;
inline int TileOffset(int x, int y);
inline int PixelOffset(int x, int y);
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
inline int FrameBufferTwoLvl<PackedColor, FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::TileOffset(int x, int y)
{
if(FB_BIN_SIZE != 0) // #static_if: assume compiler will opt this
{
assert(x % FB_TILE_SIZE_X == 0);
assert(y % FB_TILE_SIZE_Y == 0);
const int by = y/FB_BIN_SIZE;
const int bx = x/FB_BIN_SIZE;
const int y0 = y%FB_BIN_SIZE;
const int x0 = x%FB_BIN_SIZE;
const int tx = x0/FB_TILE_SIZE_X;
const int ty = y0/FB_TILE_SIZE_Y;
const int offToBin = (by*m_binsX + bx)*(FB_BIN_SIZE*FB_BIN_SIZE);
const int offToTile = (ty*TILES_IN_BIN_X + tx)*PIXS_IN_TILE;
assert( (offToBin + offToTile) % 16 == 0);
return offToBin + offToTile;
}
else
{
const int tx = x/FB_TILE_SIZE_X;
const int ty = y/FB_TILE_SIZE_Y;
return (ty*m_tilesTotalX + tx)*PIXS_IN_TILE;
}
}
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
inline int FrameBufferTwoLvl<PackedColor, FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::PixelOffset(int x, int y)
{
const int lx = x%FB_TILE_SIZE_X;
const int ly = y%FB_TILE_SIZE_Y;
return TileOffset(x,y) + ly*FB_TILE_SIZE_X + lx;
}
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
void FrameBufferTwoLvl<PackedColor,FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::Resize(int a_x, int a_y)
{
m_width = a_x;
m_height = a_y;
if(FB_BIN_SIZE != 0)
{
assert(a_x % FB_BIN_SIZE == 0);
assert(a_y % FB_BIN_SIZE == 0);
m_binsX = a_x/FB_BIN_SIZE;
m_binsY = a_y/FB_BIN_SIZE;
}
else
{
m_binsX = 0;
m_binsY = 0;
}
m_tilesTotalX = a_x/FB_TILE_SIZE_X;
m_tilesTotalY = a_y/FB_TILE_SIZE_Y;
m_depth.resize(a_x*a_y);
m_color.resize(a_x*a_y);
}
inline static bool IsAligned(const void * ptr, std::uintptr_t alignment) noexcept
{
auto iptr = reinterpret_cast<std::uintptr_t>(ptr);
return !(iptr % alignment);
}
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
void FrameBufferTwoLvl<PackedColor,FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::CopyToPitchLinear(uint32_t* a_data, int a_pitch, bool invertY = true)
{
const int binsTotal = m_binsY*m_binsX;
if(invertY)
{
for(int y=0; y<m_height; y+= FB_TILE_SIZE_Y)
{
for(int x=0; x<m_width; x += FB_TILE_SIZE_X)
{
const PackedColor* tilecolor = TileColor(x,y);
const cvex::vuint4 tileRow0 = cvex::load(tilecolor + 0);
const cvex::vuint4 tileRow1 = cvex::load(tilecolor + 4);
const cvex::vuint4 tileRow2 = cvex::load(tilecolor + 8);
const cvex::vuint4 tileRow3 = cvex::load(tilecolor + 12);
cvex::store(a_data + (m_height - (y + 0) - 1)*a_pitch + x, tileRow0);
cvex::store(a_data + (m_height - (y + 1) - 1)*a_pitch + x, tileRow1);
cvex::store(a_data + (m_height - (y + 2) - 1)*a_pitch + x, tileRow2);
cvex::store(a_data + (m_height - (y + 3) - 1)*a_pitch + x, tileRow3);
}
}
}
else
{
for(int y=0; y<m_height; y+= FB_TILE_SIZE_Y)
{
for(int x=0; x<m_width; x += FB_TILE_SIZE_X)
{
const PackedColor* tilecolor = TileColor(x,y);
const cvex::vuint4 tileRow0 = cvex::load(tilecolor + 0);
const cvex::vuint4 tileRow1 = cvex::load(tilecolor + 4);
const cvex::vuint4 tileRow2 = cvex::load(tilecolor + 8);
const cvex::vuint4 tileRow3 = cvex::load(tilecolor + 12);
cvex::store(a_data + (y + 0)*a_pitch + x, tileRow0);
cvex::store(a_data + (y + 1)*a_pitch + x, tileRow1);
cvex::store(a_data + (y + 2)*a_pitch + x, tileRow2);
cvex::store(a_data + (y + 3)*a_pitch + x, tileRow3);
}
}
}
}
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
void FrameBufferTwoLvl<PackedColor,FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::ClearColor(uint32_t a_color)
{
if(a_color == 0)
{
memset(m_color.data(), 0, m_color.size()*sizeof(PackedColor));
return;
}
const cvex::vuint4 vcolor = cvex::splat(a_color);
const int size = (m_width*m_height);
for(int i=0; i<size; i+=8)
{
cvex::store(m_color.data() + i + 0, vcolor);
cvex::store(m_color.data() + i + 4, vcolor);
}
}
template<typename PackedColor, int FB_BIN_SIZE, int FB_TILE_SIZE_X, int FB_TILE_SIZE_Y>
void FrameBufferTwoLvl<PackedColor,FB_BIN_SIZE, FB_TILE_SIZE_X, FB_TILE_SIZE_Y>::ClearColorAndDepth(uint32_t a_color, float a_depth)
{
if(m_color.size() == 0)
return;
const cvex::vuint4 vcolor = cvex::splat(a_color);
const cvex::vfloat4 vdepth = cvex::splat(a_depth);
const int size = (m_width*m_height);
//#pragma omp parallel for
for(int i=0; i<size; i+=8)
{
cvex::store(m_color.data() + i + 0, vcolor);
cvex::store(m_color.data() + i + 4, vcolor);
cvex::store(m_depth.data() + i + 0, vdepth);
cvex::store(m_depth.data() + i + 4, vdepth);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace FB
{
template<typename SrcType, typename DstType>
static inline DstType ColorPack(const SrcType r, const SrcType g, const SrcType b, const SrcType a)
{
return DstType(0);
}
template<typename SrcType, typename DstType>
static inline DstType ColorPack(const SrcType r, const SrcType g, const SrcType b)
{
return DstType(0);
}
template<typename SrcType, typename DstType>
static inline void ColorUNPack(const DstType colorOld, SrcType& r, SrcType& g, SrcType& b, SrcType& a)
{
}
//////////////////////////////////////////////////////////////////////////////////////// scalar
//////////////////////////////////////////////////////////////////////////////////////// float to uint32_t
template<>
inline uint32_t ColorPack<float,uint32_t>(const float r, const float g, const float b, const float a)
{
constexpr float c_255 = 255.0f;
return (uint32_t(r * c_255) << 16) | // BGRA
(uint32_t(g * c_255) << 8) |
(uint32_t(b * c_255) << 0) |
(uint32_t(a * c_255) << 24);
}
template<>
inline uint32_t ColorPack<float,uint32_t>(const float r, const float g, const float b)
{
constexpr float c_255 = 255.0f;
return (uint32_t(r * c_255) << 16) | // BGRA
(uint32_t(g * c_255) << 8) |
(uint32_t(b * c_255) << 0);
}
template<>
inline void ColorUNPack<float,uint32_t>(const uint32_t colorOld,
float& r, float& g, float& b, float& a)
{
constexpr float c_255Inv = 1.0f/255.0f;
r = float( (colorOld & 0x00FF0000) >> 16)*c_255Inv;
g = float( (colorOld & 0x0000FF00) >> 8 )*c_255Inv;
b = float( (colorOld & 0x000000FF) >> 0 )*c_255Inv;
a = float( (colorOld & 0xFF000000) >> 24)*c_255Inv;
}
//////////////////////////////////////////////////////////////////////////////////////// vector4
//////////////////////////////////////////////////////////////////////////////////////// float to uint32_t
template<>
inline cvex::vuint4 ColorPack<cvex::vfloat4,cvex::vuint4>(const cvex::vfloat4 r, const cvex::vfloat4 g, const cvex::vfloat4 b, const cvex::vfloat4 a)
{
const cvex::vfloat4 c_255 = cvex::splat(255.0f);
return (cvex::to_uint32(r * c_255) << 16) | // BGRA
(cvex::to_uint32(g * c_255) << 8) |
(cvex::to_uint32(b * c_255) << 0) |
(cvex::to_uint32(a * c_255) << 24);
}
template<>
inline cvex::vuint4 ColorPack<cvex::vfloat4,cvex::vuint4>(const cvex::vfloat4 r, const cvex::vfloat4 g, const cvex::vfloat4 b)
{
const cvex::vfloat4 c_255 = cvex::splat(255.0f);
return (cvex::to_uint32(r * c_255) << 16) | // BGRA
(cvex::to_uint32(g * c_255) << 8) |
(cvex::to_uint32(b * c_255) << 0);
}
template<>
inline void ColorUNPack<cvex::vfloat4,cvex::vuint4>(const cvex::vuint4 colorOld,
cvex::vfloat4& r, cvex::vfloat4& g, cvex::vfloat4& b, cvex::vfloat4& a)
{
const cvex::vfloat4 c_255Inv = cvex::splat(1.0f/255.0f);
r = cvex::to_float32( (colorOld & 0x00FF0000) >> 16)*c_255Inv;
g = cvex::to_float32( (colorOld & 0x0000FF00) >> 8 )*c_255Inv;
b = cvex::to_float32( (colorOld & 0x000000FF) >> 0 )*c_255Inv;
a = cvex::to_float32( (colorOld & 0xFF000000) >> 24)*c_255Inv;
}
//////////////////////////////////////////////////////////////////////////////////////// vector8
//////////////////////////////////////////////////////////////////////////////////////// float to uint32_t
template<>
inline cvex8::vuint8 ColorPack<cvex8::vfloat8,cvex8::vuint8>(const cvex8::vfloat8 r, const cvex8::vfloat8 g, const cvex8::vfloat8 b, const cvex8::vfloat8 a)
{
const cvex8::vfloat8 c_255 = cvex8::splat(255.0f);
return (cvex8::to_uint32(r * c_255) << 16) | // BGRA
(cvex8::to_uint32(g * c_255) << 8) |
(cvex8::to_uint32(b * c_255) << 0) |
(cvex8::to_uint32(a * c_255) << 24);
}
template<>
inline cvex8::vuint8 ColorPack<cvex8::vfloat8,cvex8::vuint8>(const cvex8::vfloat8 r, const cvex8::vfloat8 g, const cvex8::vfloat8 b)
{
const cvex8::vfloat8 c_255 = cvex8::splat(255.0f);
return (cvex8::to_uint32(r * c_255) << 16) | // BGRA
(cvex8::to_uint32(g * c_255) << 8) |
(cvex8::to_uint32(b * c_255) << 0);
}
template<>
inline void ColorUNPack<cvex8::vfloat8,cvex8::vuint8>(const cvex8::vuint8 colorOld,
cvex8::vfloat8& r, cvex8::vfloat8& g, cvex8::vfloat8& b, cvex8::vfloat8& a)
{
const cvex8::vfloat8 c_255Inv = cvex8::splat(1.0f/255.0f);
r = cvex8::to_float32( (colorOld & 0x00FF0000) >> 16)*c_255Inv;
g = cvex8::to_float32( (colorOld & 0x0000FF00) >> 8 )*c_255Inv;
b = cvex8::to_float32( (colorOld & 0x000000FF) >> 0 )*c_255Inv;
a = cvex8::to_float32( (colorOld & 0xFF000000) >> 24)*c_255Inv;
}
//////////////////////////////////////////////////////////////////////////////////////// vector16
//////////////////////////////////////////////////////////////////////////////////////// float to uint32_t
#ifndef WIN32
template<>
inline cvex16::vuint16 ColorPack<cvex16::vfloat16,cvex16::vuint16>(const cvex16::vfloat16 r, const cvex16::vfloat16 g, const cvex16::vfloat16 b, const cvex16::vfloat16 a)
{
const cvex16::vfloat16 c_255 = cvex16::splat(255.0f);
return (cvex16::to_uint32(r * c_255) << 16) | // BGRA
(cvex16::to_uint32(g * c_255) << 8) |
(cvex16::to_uint32(b * c_255) << 0) |
(cvex16::to_uint32(a * c_255) << 24);
}
template<>
inline cvex16::vuint16 ColorPack<cvex16::vfloat16,cvex16::vuint16>(const cvex16::vfloat16 r, const cvex16::vfloat16 g, const cvex16::vfloat16 b)
{
const cvex16::vfloat16 c_255 = cvex16::splat(255.0f);
return (cvex16::to_uint32(r * c_255) << 16) | // BGRA
(cvex16::to_uint32(g * c_255) << 8) |
(cvex16::to_uint32(b * c_255) << 0);
}
template<>
inline void ColorUNPack<cvex16::vfloat16,cvex16::vuint16>(const cvex16::vuint16 colorOld,
cvex16::vfloat16& r, cvex16::vfloat16& g, cvex16::vfloat16& b, cvex16::vfloat16& a)
{
const cvex16::vfloat16 c_255Inv = cvex16::splat(1.0f/255.0f);
r = cvex16::to_float32( (colorOld & 0x00FF0000) >> 16)*c_255Inv;
g = cvex16::to_float32( (colorOld & 0x0000FF00) >> 8 )*c_255Inv;
b = cvex16::to_float32( (colorOld & 0x000000FF) >> 0 )*c_255Inv;
a = cvex16::to_float32( (colorOld & 0xFF000000) >> 24)*c_255Inv;
}
#endif
}; |
GB_binop__bxor_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__bxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint16)
// A*D function (colscale): GB (_AxD__bxor_uint16)
// D*A function (rowscale): GB (_DxB__bxor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint16)
// C=scalar+B GB (_bind1st__bxor_uint16)
// C=scalar+B' GB (_bind1st_tran__bxor_uint16)
// C=A+scalar GB (_bind2nd__bxor_uint16)
// C=A'+scalar GB (_bind2nd_tran__bxor_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) ^ (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_UINT16 || GxB_NO_BXOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bxor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB (_bind1st_tran__bxor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB (_bind2nd_tran__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
smod.c |
#ifndef _SMOD_H_
#include "smod.h"
#endif
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* brinv -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
int brinv (double *a,int n)
{
int *is, *js, i, j, k, l, u, v, rk;
double d, p;
is = (int *)malloc (n * sizeof(int));
js = (int *)malloc (n * sizeof(int));
for (k = 0; k <= n - 1; k++)
{
d=0.0;
for (i = k; i <= n - 1; i++)
{
for (j = k; j <= n - 1; j++)
{
l = i * n + j;
p = fabs (a[l]);
if (p > d)
{
d = p;
is[k] = i;
js[k] = j;
}
}
}
if (d + 1.0 == 1.0)
{
free (is);
free (js);
rk = brank(a, n, n);
printf ("error: Matrix is ill-conditioned:\n");
printf (" not inv\n");
printf (" rank = %d\n", rk);
exit(0);
}
if (is[k] != k)
{
for (j = 0; j <= n - 1; j++)
{
u = k * n + j;
v = is[k] * n + j;
p = a[u];
a[u] = a[v];
a[v] = p;
}
}
if (js[k] != k)
{
for (i = 0; i <= n - 1; i++)
{
u = i * n + k;
v = i * n + js[k];
p = a[u];
a[u] = a[v];
a[v] = p;
}
}
l = k * n + k;
a[l] = 1.0 / a[l];
for (j = 0; j <= n - 1; j++)
{
if (j != k)
{
u = k * n + j;
a[u] = a[u] * a[l];
}
}
for (i = 0; i <= n - 1; i++)
{
if (i != k)
for (j = 0; j <= n - 1; j++)
if (j != k)
{
u = i * n + j;
a[u] = a[u] - a[i * n + k] * a[k * n + j];
}
}
for (i = 0; i <= n - 1; i++)
{
if (i != k)
{
u = i * n + k;
a[u] = - a[u] * a[l];
}
}
}
for (k = n - 1; k >= 0; k--)
{
if (js[k] != k)
{
for (j = 0; j <= n - 1; j++)
{
u = k * n + j;
v = js[k] * n + j;
p = a[u];
a[u] = a[v];
a[v] = p;
}
}
if (is[k] != k)
{
for (i = 0; i <= n - 1; i++)
{
u = i * n + k;
v = i * n + is[k];
p = a[u];
a[u] = a[v];
a[v] = p;
}
}
}
free(is);
free(js);
return(1);
}
int brank(double *a, int m, int n)
{ int i,j,k,nn,is,js,l,ll,u,v;
double q,d;
nn=m;
if (m>=n) nn=n;
k=0;
for (l=0; l<=nn-1; l++)
{ q=0.0;
for (i=l; i<=m-1; i++)
for (j=l; j<=n-1; j++)
{ ll=i*n+j; d=fabs(a[ll]);
if (d>q) { q=d; is=i; js=j;}
}
if (q+1.0==1.0) return(k);
k=k+1;
if (is!=l)
{ for (j=l; j<=n-1; j++)
{ u=l*n+j; v=is*n+j;
d=a[u]; a[u]=a[v]; a[v]=d;
}
}
if (js!=l)
{ for (i=l; i<=m-1; i++)
{ u=i*n+js; v=i*n+l;
d=a[u]; a[u]=a[v]; a[v]=d;
}
}
ll=l*n+l;
for (i=l+1; i<=n-1; i++)
{ d=a[i*n+l]/a[ll];
for (j=l+1; j<=n-1; j++)
{ u=i*n+j;
a[u]=a[u]-d*a[l*n+j];
}
}
}
return(k);
}
void choldc(double *a, int n, double p[])
/*
* Given a positive-definite symmetric matrix a[1..n][1..n],
* this routine constructs its Cholesky decomposition, A = L · LT .
* On input, only the upper triangle of a need be given; it is not modified.
* The Cholesky factor L is returned in the lower triangle of a,
* except for its diagonal elements which are returned in p[1..n].
*
*/
{
int i,j,k, rk;
double sum;
for (i=0;i<n;i++)
{
for (j=i;j<n;j++)
{
sum=a[i * n + j];
for (k=i-1;k>=0;k--)
sum -= a[i * n + k]*a[j * n + k];
if (i == j)
{
// printf("i = %d\tj = %d\tsum = %e\n", i, j, sqrt(sum));
if (sum <= 0.0)
{
rk = brank(a, n, n);
printf("error: Matrix is not positive definite:\n");
printf(" i = %d\tj = %d\tsum = %e\n", i, j, sum);
printf(" rank = %d\n", rk);
exit(0);
}
p[i]=sqrt(sum);
}
else a[j * n + i]=sum/p[i];
}
}
}
void cholsl(double *a, int n, double p[], double b[], double x[])
/*
* Solves the set of n linear equations A · x = b,
* where a is a positive-definite symmetric matrix.
* a[1..n][1..n] and p[1..n] are input as the output of the routine choldc.
* Only the lower subdiagonal portion of a is accessed.
* b[1..n] is input as the right-hand side vector.
* The solution vector is returned in x[1..n].
* a, n, and p are not modified and can be left in place for
* successive calls with different right-hand sides b.
* b is not modified unless you identify b and x in the calling sequence,
* which is allowed.
*
*/
{
int i,k;
double sum;
for (i=0;i<n;i++)
{
for (sum=b[i],k=i-1;k>=0;k--)
sum -= a[i * n + k]*x[k];
x[i]=sum/p[i];
}
for (i=n-1;i>=0;i--)
{
for (sum=x[i],k=i+1;k<n;k++)
sum -= a[k * n + i]*x[k];
x[i]=sum/p[i];
}
}
///////////********************////////////////
void solvels_chol(double *a, int n, double *y, double *x, int nocov)
{
double *p, sum;
int i, k, j;
p = (double *)calloc (n, sizeof(double));
choldc(a, n, p);
cholsl(a, n, p, y, x);
if (nocov == 1)
{
free (p);
return;
}
for (i=0;i<n;i++)
{
a[i * n + i]=1.0/p[i];
for (j=i+1;j<n;j++)
{
sum=0.0;
for (k=i;k<j;k++)
sum -= a[j * n + k]*a[k * n + i];
a[j * n + i]=sum/p[j];
}
}
for (i = 0; i <= n - 1; i++)
{
for (j = i; j <= n - 1; j++)
{
sum = 0.0;
for (k = j; k <= n - 1; k++)
sum = sum + a[k * n + i] * a[k * n + j];
a[i * n + j] = sum;
}
}
for (i = 0; i <= n - 1; i++)
{
for (j = 0; j <= i - 1; j++)
{
a[i * n + j] = 0;
}
}
free(p);
return;
}
void solvegaus(double *a, int n, double *y, double *x)
{
brinv(a, n);
brmul(a, y, n, n, 1, x);
}
void pt_orb (double ts_orb, double te_orb, double step_orb, int dim_eph)
{
FILE *fp_fxyz, *fp_faei, *fp_frtn, *fp_fllh;
int i, n;
double tt, lps, utc, xtm[6], *eph, dist, velt, tp, rtn_p[3], rtn_v[3],
ele[6], llh[3];
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--print orbit--*/
if((fp_fxyz=fopen("forb.xyz","w"))==NULL)
{
printf("Cannot write fort.xyz!\n");
getch();
exit(0);
}
if((fp_faei=fopen("forb.aei","w"))==NULL)
{
printf("Cannot write fort.aei!\n");
getch();
exit(0);
}
if((fp_frtn=fopen("forb.rtn","w"))==NULL)
{
printf("Cannot write fort.rtn!\n");
getch();
exit(0);
}
if((fp_fllh=fopen("forb.llh","w"))==NULL)
{
printf("Cannot write fort.llh!\n");
getch();
exit(0);
}
eph = (double *) calloc (dim_eph - 1, sizeof(double));
i = 0;
for (utc = ts_orb; utc <= te_orb; utc = ts_orb + step_orb * i)
{
lps = getlps (JD0 + ts_orb/86400.0);
tt = utc + (lps + 32.184);
lagrange (OR_EPH, DIM_OR, dim_eph, tt, eph);
for (n = 0; n < 6; n++)
xtm[n] = eph[n];
dist = sqrt (xtm[0] * xtm[0] + xtm[1] * xtm[1] + xtm[2] * xtm[2]);
velt = sqrt (xtm[3] * xtm[3] + xtm[4] * xtm[4] + xtm[5] * xtm[5]);
tp = xyz2aei(ele, &xtm[0], &xtm[3]);
xyz2rtn(&xtm[0], &xtm[3], &xtm[0], rtn_p);
xyz2rtn(&xtm[0], &xtm[3], &xtm[3], rtn_v);
xyz2llh(xtm, llh);
fprintf (fp_fxyz, "%14.4f %14.6f %26.14f %26.14f %26.14f ",
JD0, utc, xtm[0], xtm[1], xtm[2]);
fprintf (fp_fxyz, "%24.16f %24.16f %24.16f %16.4f %14.6f \n",
xtm[3], xtm[4], xtm[5], dist, velt);
fprintf (fp_faei, "%14.4f %14.6f %26.14f %10.6f %12.6f ",
JD0, utc, ele[0], ele[1], ele[2]);
fprintf (fp_faei, "%12.6f %12.6f %12.4f %12.4f \n",
ele[3], ele[4], ele[5], tp);
fprintf (fp_frtn, "%14.4f %14.6f %16.4f %16.4f %16.4f ",
JD0, utc, rtn_p[0], rtn_p[1], rtn_p[2]);
fprintf (fp_frtn, "%14.6f %14.6f %14.6f %16.4f %14.6f \n",
rtn_v[0], rtn_v[1], rtn_v[2], dist, velt);
fprintf (fp_fllh, "%14.4f %14.6f %26.14f %26.14f %26.14f\n",
JD0, utc, llh[0], llh[1], llh[2] - RCT);
i++;
}
fclose(fp_fxyz);
fclose(fp_faei);
fclose(fp_frtn);
fclose(fp_fllh);
free (eph);
return;
}
double mgrn1(double u,double g,double *r)
{ int i,m;
double s,w,v,t;
s=65536.0; w=2053.0; v=13849.0;
t=0.0;
for (i=1; i<=12; i++)
{ *r=(*r)*w+v; m=(int)(*r/s);
*r=*r-m*s; t=t+(*r)/s;
}
t=u+g*(t-6.0);
return(t);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* geteop - interpolation of eop
* mjd: double, input MJD
* xp, yp, ut1_utc, dx, dy: output EOP
* http://hpiers.obspm.fr/iers/eop/eopc04_05/eopc04_IAU2000.62-now
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void geteop (double mjd, double *xp, double *yp,
double *ut1_utc, double *dx, double *dy)
{
int i, mjdi;
double x1, y1, dt1, dx1, dy1, x2, y2, dt2, dx2, dy2;
for (i = 0; i < NEOP; i ++)
{
mjdi = (int)EOPMT[i * 6 + 0];
if (mjdi == (int)mjd)
{
x1 = EOPMT[i * 6 + 1];
y1 = EOPMT[i * 6 + 2];
dt1 = EOPMT[i * 6 + 3];
dx1 = EOPMT[i * 6 + 4];
dy1 = EOPMT[i * 6 + 5];
i++;
x2 = EOPMT[i * 6 + 1];
y2 = EOPMT[i * 6 + 2];
dt2 = EOPMT[i * 6 + 3];
dx2 = EOPMT[i * 6 + 4];
dy2 = EOPMT[i * 6 + 5];
break;
}
}
*xp = x1 + (x2-x1) * (mjd - mjdi);
*yp = y1 + (y2-y1) * (mjd - mjdi);
*ut1_utc = dt1 + (dt2-dt1) * (mjd - mjdi);
*dx = dx1 + (dx2-dx1) * (mjd - mjdi);
*dy = dy1 + (dy2-dy1) * (mjd - mjdi);
}
double getinfo(double *tjd, InfStruct *info)
{
int n;
double gmsth, ux[3] = {1,0,0}, uy[3] = {0,1,0}, uz[3] = {0,0,1},
tx[3], ty[3], tz[3], xp, yp, ut1_utc, dx, dy;
info->jd0 = tjd[0];
info->tt = tjd[1] * 86400.0;
info->jdt = info->jd0 + info->tt / 86400.0;
info->leaps = getlps (info->jdt);
info->utc = info->tt - info->leaps - 32.184;
if (CT != 2)
{
iau_pns(tjd, info->c_ei, CT);
mt(info->c_ei, 3, 3, info->c_ie);
return 1;
}
info->mjd = info->jd0 - 2400000.5 + info->utc/86400.0;
geteop (info->mjd, &xp, &yp, &ut1_utc, &dx, &dy);
info->xp = xp;
info->yp = yp;
info->ut1_utc = ut1_utc;
info->dx = dx;
info->dy = dy;
info->deltat = 32.184 + info->leaps - info->ut1_utc;
info->ut1 = info->utc + info->ut1_utc;
sidereal_time (info->jd0, info->ut1/86400.0, info->deltat,0,1,1, &gmsth);
info->gmst = gmsth / 24 * 360.0 * DEG2RAD;
cel_pole (info->jdt, 2, info->dx * 1e3, info->dy * 1e3);
cel2ter (info->jd0, info->ut1 / 86400.0, info->deltat, 1, 1, 0,
info->xp, info->yp, ux, tx);
cel2ter (info->jd0, info->ut1 / 86400.0, info->deltat, 1, 1, 0,
info->xp, info->yp, uy, ty);
cel2ter (info->jd0, info->ut1 / 86400.0, info->deltat, 1, 1, 0,
info->xp, info->yp, uz, tz);
for (n = 0; n < 3; n++)
{
info->c_ie[n*3] = tx[n];
info->c_ie[n*3+1] = ty[n];
info->c_ie[n*3+2] = tz[n];
}
mt(info->c_ie, 3, 3, info->c_ei);
// printf ("%d\tjd0 = %.10f\t ut1 = %.10f\t utc = %.10f\n", i,info[i].jd0, info[i].ut1, info[i].utc );
// for (n = 0; n < 9; n++)
// printf ("%e\n", info[i].c_ie[n]);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* itrf2gcrf(icrf) - from earth fixed to earth inertial
* jd: double, integral part of JD day, unit: day
* utc: double, fractional part of JD day, unit: seconds
* @param2: description of param2
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void itrf2gcrf(double jd, double utc, double *vt, double *vc)
{
double lps, mjd, xp, yp, ut1_utc, dx, dy, delta_t, ut1, tt;
mjd = jd - 2400000.5;
geteop (mjd + utc/86400.0, &xp, &yp, &ut1_utc, &dx, &dy);
lps = getlps (jd + utc/86400.0);
delta_t = 32.184 + lps - ut1_utc;
ut1 = utc + ut1_utc;
tt = utc + (lps + 32.184);
cel_pole (jd + tt / 86400.0, 2, dx * 1e3, dy * 1e3);
ter2cel (jd, ut1 / 86400.0, delta_t, 1, 1, 0,
xp, yp, vt, vc); /*--vc unit: m--*/
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* * gcrf(icrf)2itrf - from earth fixed to earth inertial
* * jd: double, integral part of JD day, unit: day
* * utc: double, fractional part of JD day, unit: seconds
* * @param2: description of param2
*
* * version: 20 Aug 2010
* */
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void gcrf2itrf(double jd, double utc, double *vc, double *vt)
{
double lps, mjd, xp, yp, ut1_utc, dx, dy, delta_t, ut1, tt;
mjd = jd - 2400000.5;
geteop (mjd + utc/86400.0, &xp, &yp, &ut1_utc, &dx, &dy);
lps = getlps (jd + utc/86400.0);
delta_t = 32.184 + lps - ut1_utc;
ut1 = utc + ut1_utc;
tt = utc + (lps + 32.184);
cel_pole (jd + tt / 86400.0, 2, dx * 1e3, dy * 1e3);
cel2ter (jd, ut1 / 86400.0, delta_t, 1, 1, 0,
xp, yp, vc, vt); /*--vc unit: m--*/
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
*
* getlps - get the leap seconds value for input JD
*
* jdutc: double, Julian Day of UTC
* return: short int, leap seconds
*
* version: Mar 2013
*
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
int getlps (double jdutc)
{
/*
*
1972 JAN 1 =JD 2441317.5 TAI-UTC= 10.0 S + (MJD - 41317.) X 0.0 S
1972 JUL 1 =JD 2441499.5 TAI-UTC= 11.0 S + (MJD - 41317.) X 0.0 S
1973 JAN 1 =JD 2441683.5 TAI-UTC= 12.0 S + (MJD - 41317.) X 0.0 S
1974 JAN 1 =JD 2442048.5 TAI-UTC= 13.0 S + (MJD - 41317.) X 0.0 S
1975 JAN 1 =JD 2442413.5 TAI-UTC= 14.0 S + (MJD - 41317.) X 0.0 S
1976 JAN 1 =JD 2442778.5 TAI-UTC= 15.0 S + (MJD - 41317.) X 0.0 S
1977 JAN 1 =JD 2443144.5 TAI-UTC= 16.0 S + (MJD - 41317.) X 0.0 S
1978 JAN 1 =JD 2443509.5 TAI-UTC= 17.0 S + (MJD - 41317.) X 0.0 S
1979 JAN 1 =JD 2443874.5 TAI-UTC= 18.0 S + (MJD - 41317.) X 0.0 S
1980 JAN 1 =JD 2444239.5 TAI-UTC= 19.0 S + (MJD - 41317.) X 0.0 S
1981 JUL 1 =JD 2444786.5 TAI-UTC= 20.0 S + (MJD - 41317.) X 0.0 S
1982 JUL 1 =JD 2445151.5 TAI-UTC= 21.0 S + (MJD - 41317.) X 0.0 S
1983 JUL 1 =JD 2445516.5 TAI-UTC= 22.0 S + (MJD - 41317.) X 0.0 S
1985 JUL 1 =JD 2446247.5 TAI-UTC= 23.0 S + (MJD - 41317.) X 0.0 S
1988 JAN 1 =JD 2447161.5 TAI-UTC= 24.0 S + (MJD - 41317.) X 0.0 S
1990 JAN 1 =JD 2447892.5 TAI-UTC= 25.0 S + (MJD - 41317.) X 0.0 S
1991 JAN 1 =JD 2448257.5 TAI-UTC= 26.0 S + (MJD - 41317.) X 0.0 S
1992 JUL 1 =JD 2448804.5 TAI-UTC= 27.0 S + (MJD - 41317.) X 0.0 S
1993 JUL 1 =JD 2449169.5 TAI-UTC= 28.0 S + (MJD - 41317.) X 0.0 S
1994 JUL 1 =JD 2449534.5 TAI-UTC= 29.0 S + (MJD - 41317.) X 0.0 S
1996 JAN 1 =JD 2450083.5 TAI-UTC= 30.0 S + (MJD - 41317.) X 0.0 S
1997 JUL 1 =JD 2450630.5 TAI-UTC= 31.0 S + (MJD - 41317.) X 0.0 S
1999 JAN 1 =JD 2451179.5 TAI-UTC= 32.0 S + (MJD - 41317.) X 0.0 S
2006 JAN 1 =JD 2453736.5 TAI-UTC= 33.0 S + (MJD - 41317.) X 0.0 S
2009 JAN 1 =JD 2454832.5 TAI-UTC= 34.0 S + (MJD - 41317.) X 0.0 S
2012 JUL 1 =JD 2456109.5 TAI-UTC= 35.0 S + (MJD - 41317.) X 0.0 S
*
*/
short int lps;
double jd = jdutc;
if (jd > 2456109.5)
lps = 35;
else if (jd > 2454832.5)
lps = 34;
else if (jd > 2453736.5)
lps = 33;
else if (jd > 2451179.5)
lps = 32;
else if (jd > 2450630.5)
lps = 31;
else if (jd > 2450083.5)
lps = 30;
else if (jd > 2449534.5)
lps = 29;
else if (jd > 2449169.5)
lps = 28;
else if (jd > 2448804.5)
lps = 27;
else if (jd > 2448257.5)
lps = 26;
else if (jd > 2447892.5)
lps = 25;
else if (jd > 2447161.5)
lps = 24;
else if (jd > 2446247.5)
lps = 23;
else if (jd > 2445516.5)
lps = 22;
else if (jd > 2445151.5)
lps = 21;
else if (jd > 2444786.5)
lps = 20;
else
{
printf ("No leapsecond configured before 1981 JUL 1 =JD 2444786.5\n");
exit (0);
}
return lps;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void openeop (char file_eop[200], int mjds, int num, double *eopmat)
{
FILE *fp_eop;
int i, mjdi;
char string[160];
if ((fp_eop = fopen (file_eop,"r")) == NULL)
{
printf ("Cannot open eop file?\n");
exit (0);
}
// for (i = 0; i < 13;i++)
// fgets (string, 160, fp_eop);
while (feof(fp_eop) == 0)
{
fgets (string, 160, fp_eop);
sscanf (string, "%*d%*d%*d%d", &mjdi);
if (mjdi == mjds - 1)
{
for (i = 0; i < num; i ++)
{
fgets (string, 160, fp_eop);
sscanf (string, "%*d%*d%*d%lf%lf%lf%lf%*lf%lf%lf",
&eopmat[i * 6 + 0], &eopmat[i * 6 + 1], &eopmat[i * 6 + 2],
&eopmat[i * 6 + 3], &eopmat[i * 6 + 4], &eopmat[i * 6 + 5]);
// printf("mjd = %f\n", eopmat[i * 6 + 0]);
}
break;
}
}
fclose (fp_eop);
}
void aei2xyz (double ele[6], double pos[3], double vel[3])
{
double a, e, i, omega, w, M, E,r, P[3], Q[3], n, GM, radius;
int x;
GM = GMCT;
radius = RCT;
a = ele[0];
e = ele[1];
i = ele[2] * DEG2RAD;
omega = ele[3] * DEG2RAD;
w = ele[4] * DEG2RAD;
M = ele[5] * DEG2RAD;
n=sqrt(GM/(a*a*a));
E=kepler(M,e);
P[0]=cos(omega)*cos(w)-sin(omega)*sin(w)*cos(i);
P[1]=sin(omega)*cos(w)+cos(omega)*sin(w)*cos(i);
P[2]=sin(w)*sin(i);
Q[0]=-cos(omega)*sin(w)-sin(omega)*cos(w)*cos(i);
Q[1]=-sin(omega)*sin(w)+cos(omega)*cos(w)*cos(i);
Q[2]=cos(w)*sin(i);
for(x=0;x<3;x++)
{
pos[x]=a*(cos(E)-e)*P[x]+a*sqrt(1-e*e)*sin(E)*Q[x];
}
r = modvect (pos);
if (r <= radius)
{
printf("error: r <= radius ! in aei2xyz \n");
}
for(x=0;x<3;x++)
{
vel[x]=-a*a*n/r*sin(E)*P[x]+a*a*n/r*sqrt(1-e*e)*cos(E)*Q[x];
}
}
double kepler(double M,double e)
{
double E0,E1=M;
do
{
E0=E1;
E1=M+e*sin(E0);
}
while(fabs(E0-E1)>=1e-10);
return(E1);
}
double xyz2aei(double ele[6], double pos[3], double vel[3])
{
double a, e, omega, i, w, E, M, r, v, h, HV[3], n,
GM, radius, Pz, Qz;
GM = GMCT;
radius = RCT;
r = modvect (pos);
v = modvect (vel);
if (r <= radius)
{
printf("error: r <= radius ! in xyz2aei \n");
}
a = 1.0 / (2.0 / r - v * v / GM);
crsvect (pos, vel, HV);
h = modvect(HV);
e = sqrt (1.0 - h * h / GM / a);
i = acos (HV[2] / h); //unit: rad
omega = chosephase (HV[0] / h / sin(i), - HV[1] / h / sin(i)); //unit: rad
if(a <= 0)
{
ele[0]=a,ele[1]=e,ele[2]=i * RAD2DEG;
ele[3]=omega * RAD2DEG,ele[4]=0,ele[5]=0;
// printf("error: a <= 0 !\n");
return 0;
}
if(a <= radius)
{
printf("warning: a <= radius !\n");
}
n = sqrt ( GM / (a*a*a) );
E = chosephase ( dotvect(pos, vel) / (a * a * n * e), (1.0 - r / a) / e); //unit: rad
M = E - e * sin(E); //unit: rad
Pz = (cos(E) / r * pos[2] - sin(E) / n / a * vel[2]);
Qz = (sin(E) / r / sqrt(1.0-e*e) * pos[2] + (cos(E) - e) / n / a / sqrt(1.0-e*e) * vel[2]);
w = chosephase ( Pz / sin(i), Qz /sin(i)); //unit: rad
ele[0] = a;
ele[1] = e;
ele[2] = i * RAD2DEG;
ele[3] = omega * RAD2DEG;
ele[4] = w * RAD2DEG;
ele[5] = M * RAD2DEG;
return TWOPI / n;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* opengravfile ¨C
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double opengrv (char file_grv[2][200], double *coef, int nmax, int mmax)
{
FILE *fp_grv;
double c,s;
int n,m, l, ind;
char string[200], name[20];
if ((fp_grv = fopen (file_grv[0],"r")) == NULL)
{
printf ("Cannot open gravity file?\n");
exit (0);
}
// coef[0] = 1; // include zero degree term
coef[0] = 0; // exclude zero degree term
while (1)
{
if (fgets (string, 200, fp_grv) == NULL) break;
// sscanf (string, "%d%d%lf%lf", &n, &m, &c, &s);
if (strlen(file_grv[1])==0)
{
sscanf (string, "%d%d%lf%lf", &n, &m, &c, &s);
}
else
{
sscanf (string, "%s", name);
if (strcmp (name,file_grv[1]) ==0)
{
sscanf (string, "%*s%d%d%lf%lf", &n, &m, &c, &s);
// printf ("n = %d m = %d c = %e s = %e\n", n, m, c, s);
}
}
// if (n > nmax || n < 0)
if (n > nmax || n < 2 || m > mmax) // permanently exclude degree 1 @7/24/2012
continue;
else if (m == 0)
{
coef[n] = c;
}
else
{
l = nmax - m + 1;
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
coef[ind + n - m] = c;
coef[ind + n - m + l] = s;
// coef[ind + n - m] = 0;
// coef[ind + n - m + l] = 0;
}
}
printf ("coef[2] = %e\n", coef[2]);
if (PERMT == 1)
{
coef[2] = coef[2] - 4.201e-9; //tn32
// coef[2] = coef[2] - 4.1736e-9; //tn36
}
printf ("coef[2] = %e\n", coef[2]);
fclose(fp_grv);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* xyz2llh - xyz to latitude, longitude, height
* @param1: description of param1
* @param2: description of param2
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void xyz2llh (double *vt, double *llh)
{
double r, cosphi, phi, costhe, sinthe;
r = sqrt (vt[0] * vt[0] + vt[1] * vt[1] + vt[2] * vt[2]);
cosphi = vt[2] / r;
phi = acos(cosphi) ;
costhe = vt[0] / r / sin(phi);
sinthe = vt[1] / r / sin(phi);
llh[2] = r;
llh[1] = chosephase(sinthe, costhe) * RAD2DEG;
llh[0] = 90.0 - phi * RAD2DEG;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_pointmass - abandoned
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double earth_pointmass (double jd, double tdbs, double *x, double *f)
{
double GM, radius, gmde, fnt[3], fgr[3], r, s2, rrd, a, b;
int n, gamma;
GM = GMCT;
radius = RCT;
gmde = GM * 86400.0 * 86400.0 / AU / AU / AU;
gamma = 1;
f[0] = x[3];
f[1] = x[4];
f[2] = x[5];
r = sqrt (x[0]*x[0]+x[1]*x[1]+x[2]*x[2]);
s2 = x[3] * x[3] + x[4] * x[4] + x[5] * x[5];
rrd = x[0] * x[3] + x[1] * x[4] + x[2] * x[5];
a = 2 * (1 + gamma) * gmde / r - gamma * s2;
b = 2 * (1 + gamma) * rrd;
for (n = 0; n < 3; n++)
fgr[n] = gmde / C_AUDAY / C_AUDAY / r / r / r
* ( a * x[n] + b * x[n+3] );
fnt[0] = - gmde / (r*r*r) * x[0];
fnt[1] = - gmde / (r*r*r) * x[1];
fnt[2] = - gmde / (r*r*r) * x[2];
for (n = 0; n < 3; n++)
{
f[3 + n] = fnt[n];
}
return 0;
}
double accel_point (double *tjd, double *x, double *fnt, double *fgr)
{
double GM, radius, gmde, r, s2, rrd, a, b;
int n, gamma;
GM = GMCT;
radius = RCT;
gmde = GM * 86400.0 * 86400.0 / AU / AU / AU;
gamma = 1;
r = sqrt (x[0]*x[0]+x[1]*x[1]+x[2]*x[2]);
s2 = x[3] * x[3] + x[4] * x[4] + x[5] * x[5];
rrd = x[0] * x[3] + x[1] * x[4] + x[2] * x[5];
a = 2 * (1 + gamma) * gmde / r - gamma * s2;
b = 2 * (1 + gamma) * rrd;
for (n = 0; n < 3; n++)
fgr[n] = gmde / C_AUDAY / C_AUDAY / r / r / r
* ( a * x[n] + b * x[n+3] );
fnt[0] = - gmde / (r*r*r) * x[0];
fnt[1] = - gmde / (r*r*r) * x[1];
fnt[2] = - gmde / (r*r*r) * x[2];
return 0;
}
double accel_pmiers (double *tjd, double *x, double *fnt, double *fgr)
{
double GME, GMS, J, Jv[3], beta, gamma, r, v2, pv, pJ, a, b, p[3], v[3],
pxv[3], vxJ[3], ps[3], vs[3], rs, vsxps[3], vsxpsxv[3],
term1[3], term2[3], term3[3];
int n;
short int sun = 10;
GME = GMCT; //m^3/s^2
J = 9.8e8; //m^2/s
gamma = 1;
beta = 1;
GMS = 1.32712442076e20; //m^3/s^2
GME = GME * 86400.0 * 86400.0 / AU / AU / AU;
GMS = GMS * 86400.0 * 86400.0 / AU / AU / AU;
J = J * 86400.0 / AU / AU;
Jv[0] = 0; Jv[1] = 0; Jv[2] = J;
p[0] = x[0]; p[1] = x[1]; p[2] = x[2];
v[0] = x[3]; v[1] = x[4]; v[2] = x[5];
r = modvect(p);
v2 = dotvect(v, v);
pv = dotvect(p, v);
pJ = dotvect(p, Jv);
crsvect(p, v, pxv);
crsvect(v, Jv, vxJ);
planet_ephemeris (tjd, CT, sun, ps, vs);
rs = modvect(ps);
crsvect(vs, ps, vsxps);
crsvect(vsxps, v, vsxpsxv);
a = 2 * (beta + gamma) * GME / r - gamma * v2;
b = 2 * (1 + gamma) * pv;
for (n = 0; n < 3; n++)
{
term1[n] = GME / C_AUDAY / C_AUDAY / r / r / r *
( a * p[n] + b * v[n] );
term2[n] = GME / C_AUDAY / C_AUDAY / r / r / r * (1 + gamma) *
( 3/r/r * pxv[n] * pJ + vxJ[n] );
term3[n] = - GMS / C_AUDAY / C_AUDAY / rs / rs / rs * (1 + 2 * gamma) *
vsxpsxv[n];
fgr[n] = term1[n]
+ term2[n] + term3[n];
// printf ("%15.12f\t%15.12f\t%15.12f\n", term1[n],term2[n],term2[n]);
}
fnt[0] = - GME / (r*r*r) * p[0];
fnt[1] = - GME / (r*r*r) * p[1];
fnt[2] = - GME / (r*r*r) * p[2];
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_fullstate -transition matrix(36), orbit(6), sensitivity matrix(6*DYNPAR)
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void fun_accel (int dim, double jd, double tt, double *state, double *fstate)
{
int n, i,k, part;
double tjd[2], xic[6], dfdx[36], dxdx0[36],
acc1[3], dadr1[9],
acc2[3], dadr2[9],
acc3[3], dadr3[9], dadsrpb[3], dadsrpt[3],
acc4[4], dadr4[9], dadk2[3],
acc[3], dadr[9],
fxic[6], fdxdx0[36];
double ap[3], an[3], ar[3], ag[3], apgr[3], angr[3], at[3], ao[3];
double *dadp, *dxdp, *dfdpp, *dfdp, *fdxdp;
tjd[0] = jd; tjd[1] = tt / 86400.0;
// tjd[0] = jd; tjd[1] = tt;
if (dim < 6)
{
printf ("error: fun_accel input dim < 6!\n");
exit (0);
}
else if (dim == 6)
part = 0;
else if (dim > 6)
{
part = 1;
}
for (n = 0; n < 6; n++)
{
xic[n] = state[n];
}
/* acc, partial to xyz: dadr, partial to parameters dadp*/
// accel_ntrel (tjd, xic, part, acc1, dadr1, dadp1);
// accel_nonsp (tjd, xic, part, acc2, dadr2, dadp2);
// accel_radpr (tjd, xic, part, acc3, dadr3, dadp3);
/*todo: air drag acc & partial to vxvyvz dadv*/
accel_pm_part (tjd, xic, ap, part, dadr1);
accel_nb_part (tjd, xic, an, part, dadr2);
accel_sr_part (tjd, xic, ar, part, dadr3, dadsrpb, dadsrpt);
accel_gt_part (tjd, xic, ag, part, dadr4, dadk2);
for (n = 0; n <= 2; n++)
{
acc[n] = ap[n] + an[n] + ar[n] + ag[n];
}
fxic[0] = xic[3];
fxic[1] = xic[4];
fxic[2] = xic[5];
fxic[3] = acc[0];
fxic[4] = acc[1];
fxic[5] = acc[2];
for (n = 0; n < 6; n++)
{
fstate[n] = fxic[n];
}
if (part == 0)
{
return;
}
for (n = 0; n < 36; n++)
{
dxdx0[n] = state[n + 6];
}
for (n = 0; n <= 8; n++)
{
dadr[n] = dadr1[n] + dadr2[n] + dadr3[n] + dadr4[n];
// dadr[n] = dadr1[n];
}
for (n = 0; n < 36; n++)
{
dfdx[n] = 0;
}
dfdx[3] = 1;
dfdx[10] = 1;
dfdx[17] = 1;
for (n = 0; n < 3; n++)
{
dfdx[n + 18] = dadr[n];
dfdx[n + 24] = dadr[n + 3];
dfdx[n + 30] = dadr[n + 6];
}
brmul(dfdx, dxdx0, 6, 6, 6, fdxdx0);
for (n = 0; n < 36; n++)
{
fstate[n + 6] = fdxdx0[n];
}
if (MDYN == 0)
return;
dadp = (double *) calloc ( 3 * MDYN, sizeof(double));
dxdp = (double *) calloc ( 6 * MDYN, sizeof(double));
dfdpp = (double *) calloc ( 6 * MDYN, sizeof(double));
dfdp = (double *) calloc ( 6 * MDYN, sizeof(double));
fdxdp = (double *) calloc ( 6 * MDYN, sizeof(double));
for (n = 0; n < 6 * MDYN; n++)
{
dxdp[n] = state[n + 42];
}
i = 0;
if (MSRP > 0)
{
for (n = 0; n < 3; n++)
{
dadp[n * MDYN + i] = dadsrpb[n];
}
i++;
}
if (MSRP > 1)
{
for (n = 0; n < 3; n++)
{
dadp[n * MDYN + i] = dadsrpt[n];
}
i++;
}
if (MTK2 > 0)
{
for (n = 0; n < 3; n++)
{
dadp[n * MDYN + i] = dadk2[n];
}
i++;
}
if (MGCS > 0)
{
for (k = 0; k < MGCS; k ++)
{
for (n = 0; n < 3; n++)
{
dadp[n * MDYN + i] = CSinfo[k].dadcs[n];
}
i++;
}
}
brmul(dfdx, dxdp, 6, 6, MDYN, dfdpp);
for (n = 0; n < 3 * MDYN; n++)
{
dfdp[n] = 0;
dfdp[n + 3 * MDYN] = dadp[n];
}
for (n = 0; n < 6 * MDYN; n++)
{
fdxdp[n] = dfdpp[n] + dfdp[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
for (n = 0; n < 6 * MDYN; n++)
{
fstate[n + 42]= fdxdp[n];
}
free (dadp);
free (dxdp);
free (dfdpp);
free (dfdp);
free (fdxdp);
return;
}
double accel_pm_part (double *tjd, double *x, double *acc, int part, double *dadr)
{
double GME, GMS, J, Jv[3], beta, gamma, r, v2, pv, pJ, a, b, p[3], v[3],
pxv[3], vxJ[3], ps[3], vs[3], xsc[6], rs, vsxps[3], vsxpsxv[3],
unit[9], ppt[9], r5, r3, fgr[3], fnt[3], term1[3], term2[3], term3[3];
int n;
short int sun = 10;
GME = GMCT; //m^3/s^2
// GME = GME * 86400.0 * 86400.0 / AU / AU / AU;
p[0] = x[0]; p[1] = x[1]; p[2] = x[2];
v[0] = x[3]; v[1] = x[4]; v[2] = x[5];
r = modvect(p);
acc[0] = - GME / (r*r*r) * p[0];
acc[1] = - GME / (r*r*r) * p[1];
acc[2] = - GME / (r*r*r) * p[2];
if (part == 1)
{
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
r5 = pow (r, 5);
r3 = pow (r, 3);
brmul (p, p, 3,1,3, ppt);
for (n = 0; n <= 8; n++)
{
dadr[n] = 3 * GME * ppt[n] / r5
- GME * unit[n] / r3;
}
}
if (RELTIV == 0)
return 0;
J = 9.8e8; //m^2/s
gamma = 1;
beta = 1;
GMS = 1.32712442076e20; //m^3/s^2
// GMS = GMS * 86400.0 * 86400.0 / AU / AU / AU;
// J = J * 86400.0 / AU / AU;
Jv[0] = 0; Jv[1] = 0; Jv[2] = J;
v2 = dotvect(v, v);
pv = dotvect(p, v);
pJ = dotvect(p, Jv);
crsvect(p, v, pxv);
crsvect(v, Jv, vxJ);
// planet_ephemeris (tjd, CT, sun, ps, vs);
get_ephemeris (tjd, CT, sun, xsc);
for (n = 0; n < 3; n++)
{
ps[n] = xsc[n];
vs[n] = xsc[n + 3];
}
rs = modvect(ps);
crsvect(vs, ps, vsxps);
crsvect(vsxps, v, vsxpsxv);
a = 2 * (beta + gamma) * GME / r - gamma * v2;
b = 2 * (1 + gamma) * pv;
for (n = 0; n < 3; n++)
{
term1[n] = GME / C / C / r / r / r *
( a * p[n] + b * v[n] );
term2[n] = GME / C / C / r / r / r * (1 + gamma) *
( 3/r/r * pxv[n] * pJ + vxJ[n] );
term3[n] = - GMS / C / C / rs / rs / rs * (1 + 2 * gamma) *
vsxpsxv[n];
fgr[n] = term1[n]
+ term2[n] + term3[n];
// printf ("%15.12f\t%15.12f\t%15.12f\n", term1[n],term2[n],term2[n]);
}
acc[0] = acc[0] + fgr[0];
acc[1] = acc[1] + fgr[1];
acc[2] = acc[2] + fgr[2];
return 0;
}
int get_ephemeris (double tjd[2], int to, int from, double *x)
{
double jd0 = 2451545.00000000, lt, tdbj2000, fromTtoS[6], pos[3], vel[3];
int n;
short int center, target;
if (from <= 12 && to <= 12)
{
center = (short int)from;
target = (short int)to;
planet_ephemeris (tjd, target, center, pos, vel);
x[0] = pos[0] * AU;
x[1] = pos[1] * AU;
x[2] = pos[2] * AU;
x[3] = vel[0] * AU / 86400.0;
x[4] = vel[1] * AU / 86400.0;
x[5] = vel[2] * AU / 86400.0;
}
else if (from == I_TITAN || to == I_TITAN) //titan
{
tdbj2000 = ((tjd[0] - jd0) + tjd[1]) * 86400.0;
spkezr_c ("SATURN", tdbj2000, "J2000", "NONE", "TITAN", fromTtoS, <);
/*
Procedure
void spkezr_c ( ConstSpiceChar *targ,
SpiceDouble et,
ConstSpiceChar *ref,
ConstSpiceChar *abcorr,
ConstSpiceChar *obs,
SpiceDouble starg[6],
SpiceDouble *lt )
Return the state (position and velocity) of a target body
relative to an observing body, optionally corrected for light
time (planetary aberration) and stellar aberration.
*/
if (from == I_TITAN) center = (short int) to;
else center = (short int) from;
planet_ephemeris (tjd, center, 5, pos, vel);
for (n = 0; n < 3; n++)
{
x[n] = pos[n] * AU + fromTtoS[n] * 1000.0;
x[n + 3] = vel[n] * AU / 86400.0 + fromTtoS[n + 3] * 1000.0;
}
if (to == I_TITAN)
{
for (n = 0; n < 6; n++)
{
x[n] = - x[n];
}
}
}
else
{
printf ("error in get_ephemeris: from = %d\t to = %d\n", from, to);
}
return 0;
}
double accel_nb_part (double *tjd, double *xic, double *acc, int part, double *dadr)
{
int n;
short int ssbary = 11;
double xcb[6], xib[6], acb[3], aib[3], dadr1[9], dadrc[9], dadri[9];
if (NBODY == 0)
{
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
}
for (n = 0; n <= 2; n++)
{
acc[n] = 0;
}
return 0;
}
// planet_ephemeris (tjd, CT, ssbary, &xcb[0], &xcb[3]);
get_ephemeris (tjd, CT, ssbary, xcb);
f_bcrs (tjd, xcb, CT, acb, part, dadrc);
for (n = 0; n <= 5; n++)
{
xib[n] = xic[n] + xcb[n];
}
f_bcrs (tjd, xib, CT, aib, part, dadri);
for (n = 0; n <= 2; n++)
{
acc[n] = aib[n] - acb[n];
}
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = dadri[n];
}
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double f_bcrs (double *tjd, double *xi, int exclude,
double *acc, int part, double *dadr)
{
double fnt[3], fgr[3], xj[11][6], xij[11][6], rij[11], xjk[6], rjk,
xddj[3], sumil, sumjk, sdi2, sdj2, rdirdj, rrrdr2, rjirdd,
rij5, rij3, xijt[9], gra, grb, beta, gamma, unit[9];
short int ssbary, l, k, j, n, flag_gr;
ssbary = 11;
gamma = 1.0;
beta = 1.0;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
for (j = 0; j <= 10; j++)
{
// planet_ephemeris (jd, j, ssbary, &xj[j][0], &xj[j][3]);
get_ephemeris (tjd, j, ssbary, xj[j]);
for (n = 0; n < 6; n++)
{
xij[j][n] = xi[n] - xj[j][n];
}
rij[j] = sqrt (xij[j][0] * xij[j][0]
+ xij[j][1] * xij[j][1] + xij[j][2] * xij[j][2]);
}
flag_gr = 0;
for (n = 0; n < 3; n ++)
fnt[n] = 0;
for (j = 0; j <= 10; j++)
{
if (PERB[j] == 2)
flag_gr = 1;
if (PERB[j] == 0)
continue;
if (j == exclude)
continue;
for (n = 0; n < 3; n++)
fnt[n] = fnt[n]
- GMDE[j] / (rij[j] * rij[j] * rij[j]) * xij[j][n];
}
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
for (j = 0; j <= 10; j++)
{
if (j == exclude)
continue;
rij5 = pow (rij[j], 5);
rij3 = pow (rij[j], 3);
brmul (xij[j], xij[j], 3,1,3, xijt);
for (n = 0; n <= 8; n++)
{
dadr[n] = dadr[n] + 3 * GMDE[j] * xijt[n] / rij5
- GMDE[j] * unit[n] / rij3;
}
}
}
if (flag_gr == 0)
{
for (n = 0; n < 3; n++)
acc[n] = fnt[n];
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdi2 = xi[3] * xi[3] + xi[4] * xi[4] + xi[5] * xi[5];
sumil = 0;
for (l = 0; l < 11; l ++)
{
if ( l == exclude)
continue;
if (PERB[l] != 2)
continue;
sumil = sumil + GMDE[l] / rij[l];
}
for (n = 0; n < 3; n ++)
fgr[n] = 0;
for (j = 0; j < 11; j ++)
{
if (PERB[j] != 2)
continue;
if (j == exclude)
continue;
sumjk = 0;
for (n = 0; n < 3; n ++)
xddj[n] = 0;
for (k = 0; k < 11; k ++)
{
if (k == j)
continue; //k!=j
if (PERB[k] != 2)
continue;
for (n = 0; n < 3; n++)
xjk[n] = xj[j][n] - xj[k][n];
rjk = sqrt (xjk[0] * xjk[0] + xjk[1] * xjk[1] + xjk[2] * xjk[2]);
sumjk = sumjk + GMDE[k] / rjk;
for (n = 0; n < 3; n ++)
xddj[n] = xddj[n] - GMDE[k] / (rjk * rjk * rjk) * xjk[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdj2 = xj[j][3] * xj[j][3] + xj[j][4] * xj[j][4]
+ xj[j][5] * xj[j][5];
rdirdj = xi[3] * xj[j][3] + xi[4] * xj[j][4] + xi[5] * xj[j][5];
rrrdr2 = pow( ( xij[j][0] * xj[j][3] + xij[j][1] * xj[j][4]
+ xij[j][2] * xj[j][5]) / rij[j], 2);
rjirdd = - ( xij[j][0] * xddj[0] + xij[j][1] * xddj[1]
+ xij[j][2] * xddj[2]);
gra = - 2 * (beta + gamma) * sumil - (2 * beta -1) * sumjk
+ gamma * sdi2 + (1 + gamma) * sdj2
- 2 * (1 + gamma) * rdirdj - 1.5 * rrrdr2 + 0.5 * rjirdd;
grb = xij[j][0] * ((2+2*gamma) * xi[3] - (1+2*gamma) * xj[j][3])
+ xij[j][1] * ((2+2*gamma) * xi[4] - (1+2*gamma) * xj[j][4])
+ xij[j][2] * ((2+2*gamma) * xi[5] - (1+2*gamma) * xj[j][5]);
for (n = 0; n < 3; n ++)
{
fgr[n] = fgr[n]
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* ( - xij[j][n]) * gra / C / C
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* xij[j][n + 3] * grb / C / C
+ GMDE[j] / rij[j] * (3 + 4 * gamma) * 0.5
* xddj[n] / C / C;
}
}
for (n = 0; n < 3; n++)
acc[n] = fgr[n] + fnt[n];
return 1;
}
double accel_sr_part (double *tjd, double *xic, double *acc, int part,
double *dadr, double *dadsrpb, double *dadsrpt)
{
double j, c1, ap, m, rsp, usp[3], xis[6], xsc[6], f,
xist[9], unit[9], rsp3;
short int n, sun;
short int ssbary = 11;
if (AMR == 0)
{
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
for (n = 0; n <= 3; n++)
{
dadsrpb[n] = 0;
dadsrpt[n] = 0;
}
}
for (n = 0; n <= 2; n++)
{
acc[n] = 0;
}
return 0;
}
sun = 10;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
// planet_ephemeris (tjd, sun, CT, &xsc[0], &xsc[3]);
get_ephemeris (tjd, sun, CT, xsc);
for (n = 0; n <= 5; n++)
{
xis[n] = xic[n] - xsc[n];
}
rsp = sqrt (xis[0] * xis[0] + xis[1] * xis[1] + xis[2] * xis[2]);
usp[0] = xis[0] / rsp;
usp[1] = xis[1] / rsp;
usp[2] = xis[2] / rsp;
j = 1352.5; //kg/s3
// j = 1359.4; //kg/s3
// m = SATMASS; //kg
// ap = SATAREA; //m2
c1 = j / C * AU * AU; //kg/s2/m*au*au
f = c1 * AMR / rsp / rsp;
// f = c1 * ap / m / rsp / rsp;
//kg/s2/m*au*au * m2 / kg / au / au = m/s2
// f = f / AU * 86400.0 * 86400.0;
// printf ("SRPB = %f\t SRPT = %f\n", SRPB, SRPT);
for (n = 0; n < 3; n++)
{
acc[n] = f * usp[n] * (1 + SRPB + SRPT * tjd[1]);
}
if (part == 0)
return 1;
rsp3 = rsp * rsp * rsp;
brmul (xis, xis, 3,1,3, xist);
for (n = 0; n <= 8; n++)
{
dadr[n] = - f * (3 * xist[n] / rsp3 - unit[n] / rsp) *
(1 + SRPB + SRPT * tjd[1]);
}
for (n = 0; n <= 2; n++)
{
dadsrpb[n] = f * usp[n];
dadsrpt[n] = f * usp[n] * tjd[1];
}
return 0;
}
double accel_gt_part (double *tjd, double *xic, double *acc, int part,
double *dadr, double *dadk2)
{
int n,k, lps, ntide, blst[12], nb;
double GM, radius, *tmp, pi[3], pe[3], llr[3], c_ie[9], c_ei[9], ae[3], ai[3];
double jd0, tt, utc, te[9], tx[3], ty[3], tz[3], ao[3], as[3], ag[3], dadk2e[3],
vx[3] = {1,0,0}, vy[3] = {0,1,0}, vz[3] = {0,0,1}, dadre[9], dadres[9], dadrei[9];
InfStruct info;
GM = GMCT;
radius = RCT;
for (n = 0; n <= 2; n++)
{
acc[n] = 0;
}
if (NMAX < 2)
{
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
for (n = 0; n <= 3; n++)
{
dadk2[n] = 0;
}
}
return 0;
}
for (n = 0; n < 3; n++)
{
pi[n] = xic[n];
}
getinfo(tjd, &info);
brmul(info.c_ie, pi, 3, 3, 1, pe);
xyz2llh(pe, llr);
// cs2acc (llr, COEFG, GM, radius, NMAX, ae);
cs2ada (llr, COEFG, NMAX, ae, part, dadre, 1);
brmul(info.c_ei, ae, 3, 3, 1, ag);
for (n = 0; n < 3; n++)
{
acc[n] = ag[n];
}
if (STIDE != 0)
{
if (STIDE == 3 && CT == 2)
stidecs_Anelastic(&info, 1, COEFS);
else if (STIDE == 2 && CT == 2)
stidecs(tjd, info.c_ie, 1, COEFS);
else if (CT == 2)
{
blst[0] = 10; blst[1] = 9; nb = 2;
stidecs_k2 (&info, K2, COEFS, blst, nb);
}
else if (CT == 9)
{
blst[0] = 10; blst[1] = 2; nb = 2;
stidecs_k2 (&info, K2, COEFS, blst, nb);
}
else if (CT == 20)
{
blst[0] = 10; blst[1] = 5; nb = 2;
stidecs_k2 (&info, K2, COEFS, blst, nb);
}
else
{
blst[0] = 10; nb = 1;
stidecs_k2 (&info, K2, COEFS, blst, nb);
}
// cs2acc (llr, COEFS, GM, radius, NSMAX, ae);
cs2ada (llr, COEFS, NSMAX, ae, part, dadres, 0);
brmul(info.c_ei, ae, 3, 3, 1, as);
for (n = 0; n < 3; n++)
{
acc[n] = acc[n] + as[n];
}
for (n = 0; n <= 8; n++)
{
dadre[n] = dadre[n] + dadres[n];
}
}
if (OTIDE != 0) // N.A.
{
otidecs(info.jdt, info.gmst, NOMAX, COEFO);
cs2acc (llr, COEFO, GM, radius, NOMAX, ae);
brmul(info.c_ei, ae, 3, 3, 1, ao);
for (n = 0; n < 3; n++)
{
acc[n] = acc[n] + ao[n];
}
}
if (part == 0)
return 1;
brmul(dadre, info.c_ie, 3, 3, 3, dadrei);
brmul(info.c_ei, dadrei, 3, 3, 3, dadr);
if (MTK2 == 1)
{
stidecs_k2 (&info, 1, COEFS, blst, nb);
cs2ada (llr, COEFS, NSMAX, dadk2e, 0, dadres, 0);
brmul(info.c_ei, dadk2e, 3, 3, 1, dadk2);
}
if (MGCS > 0)
{
for (k = 0; k < MGCS; k ++)
{
brmul(info.c_ei, CSinfo[k].dadcse, 3, 3, 1, CSinfo[k].dadcs);
}
}
return 0;
}
// nmax = 4;
// stcs = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
double stidecs_k2(InfStruct *info, double k2, double *stcs, int *body, int nbody)
{
int sun, i;
double xs[6], gms2e, tjd[2],
pse[3], llrs[3], pbar[4], t,
p20s, p30s, p21s, p31s, p22s, p32s, p33s,
rers, c20, c21, s21, c22, s22;
tjd[0] = info->jd0;
tjd[1] = info->tt/86400.0;
c20 = 0; c21 = 0; s21 = 0; c22 = 0; s22 = 0;
for (i = 0; i < nbody; i++)
{
sun = body[i];
if (sun > 12)
continue;
get_ephemeris (tjd, sun, CT, xs);
// planet_ephemeris (tjd, sun, earth, ps, vs);
brmul (info->c_ie, xs, 3, 3, 1, pse);
xyz2llh(pse, llrs);
t = sin(llrs[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20s = pbar[2]; p30s = pbar[3];
lgdr(t, 3, 1, pbar); p21s = pbar[1]; p31s = pbar[2];
lgdr(t, 3, 2, pbar); p22s = pbar[0]; p32s = pbar[1];
lgdr(t, 3, 3, pbar); p33s = pbar[0];
gms2e = GMDE[sun]/GMCT;
rers = RCT / llrs[2];
c20 += k2/5.0 * ( gms2e * pow(rers, 3) * p20s );
c21 += k2/5.0 * ( gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) );
s21 += k2/5.0 * ( gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
c22 += k2/5.0 * ( gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) );
s22 += k2/5.0 * ( gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
}
stcs[0] = 0; //c00;
stcs[1] = 0; //c10;
stcs[2] = c20;
stcs[3] = 0;
stcs[4] = c21;
stcs[5] = 0; //s11;
stcs[6] = s21;
stcs[7] = c22;
stcs[8] = s22;
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_fullaccel -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double earth_fullaccel (double jd, double tt, double *xic, double *fxic)
{
int n;
short int ssbary = 11, part = 0;
double tjd[2], ap[3], an[3], ar[3], ag[3], apgr[3], angr[3], at[3], ao[3],
acc[3];
tjd[0] = jd;
tjd[1] = tt;
// printf("%f\t%f\n", jd, tt);
// accel_point (tjd, xic, a1, a1gr);
accel_pmiers (tjd, xic, ap, apgr);
if (NBODY == 1)
accel_nbody (tjd, xic, an, angr);
else
{
for (n = 0; n < 3; n++)
{
an[n] = 0;
angr[n] = 0;
}
}
if (AMR != 0 )
accel_slrad (tjd, xic, ar);
else
{
for (n = 0; n < 3; n++)
{
ar[n] = 0;
}
}
if (NMAX >= 2 )
{
// if (STIDE == 1)
// {
accel_gravtide (tjd, xic, ag, at, ao);
// }
// else
// {
// accel_gravt (tjd, xic, ag);
// at[0] = 0; at[1] = 0; at[2] = 0;
// }
}
else
{
for (n = 0; n < 3; n++)
{
ag[n] = 0;
at[n] = 0;
ao[n] = 0;
}
}
if (RELTIV == 0)
{
for (n = 0; n < 3; n++)
{
apgr[n] = 0;
angr[n] = 0;
}
}
else if (RELTIV == 1)
{
for (n = 0; n < 3; n++)
{
angr[n] = 0;
}
}
for (n = 0; n < 3; n++)
{
// a1[n] = 0; a1gr[n] = 0;
// a2gr[n] = 0;
// ao[n] = 0;
acc[n] = ap[n] + an[n] + ar[n] + ag[n] + at[n] + ao[n]
+ apgr[n] + angr[n];
// printf ("ag=%e at=%e ao=%e \n", ag[n], at[n], ao[n]);
}
// exit(0);
fxic[0] = xic[3];
fxic[1] = xic[4];
fxic[2] = xic[5];
fxic[3] = acc[0];
fxic[4] = acc[1];
fxic[5] = acc[2];
return 0;
}
double accel_gravtide (double *tjd, double *xic, double *ag, double *as, double *ao)
{
int n, lps, ntide;
double GM, radius, *stcs, pi[3], pe[3], llr[3], c_ie[9], c_ei[9], ae[3], ai[3];
double jd0, tt, utc, te[9], tx[3], ty[3], tz[3],
vx[3] = {1,0,0}, vy[3] = {0,1,0}, vz[3] = {0,0,1};
InfStruct info;
GM = GMCT;
radius = RCT;
for (n = 0; n < 3; n++)
{
pi[n] = xic[n] * AU;
}
getinfo(tjd, &info);
/*
printf ("jd0 = %.10f\t lps = %d\t gmt = %.10f\n", info.jd0, info.leaps, info.gmst );
for (n = 0; n < 9; n++)
printf ("%e\n", info.c_ie[n]);
jd0 = tjd[0];
tt = tjd[1] * 86400.0;
lps = getlps (jd0 + tt/86400.0);
utc = tt - (lps + 32.184);
// printf("%f\t%f\n", jd0, utc);
itrf2gcrf(jd0, utc, vx, tx);
itrf2gcrf(jd0, utc, vy, ty);
itrf2gcrf(jd0, utc, vz, tz);
for (n = 0; n < 3; n++)
{
c_ei[n*3] = tx[n];
c_ei[n*3+1] = ty[n];
c_ei[n*3+2] = tz[n]; //c_ei mean ITRF2ICRF
}
mt(c_ei, 3, 3, c_ie);
for (n = 0; n < 9; n++)
printf ("%e\n", c_ie[n]);
exit(0);
*/
brmul(info.c_ie, pi, 3, 3, 1, pe);
xyz2llh(pe, llr);
// xyz2llh(pi, llr);
cs2acc (llr, COEFG, GM, radius, NMAX, ae);
// cs2acc (llr, COEFG, GM, radius, NMAX, ai);
// printf("ae = %f\t%f\t%f\n", ae[0], ae[1], ae[2]);
// exit(0);
brmul(info.c_ei, ae, 3, 3, 1, ai);
for (n = 0; n < 3; n++)
{
ag[n] = ai[n] / AU * 86400 * 86400;
}
if (STIDE == 0 && OTIDE == 0)
{
for (n = 0; n < 3; n++)
{
as[n] = 0;
ao[n] = 0;
}
return 0;
}
// ntide = 4;
// COEFS = (double *) calloc ( (ntide + 1) * (ntide + 1), sizeof(double));
// id_perm = PERM;
// stidecs(tjd, c_ie, 1, stcs);
if (STIDE == 1)
stidecs_Anelastic(&info, 1, COEFS);
if (STIDE == 2)
stidecs(tjd, info.c_ie, 1, COEFS);
cs2acc (llr, COEFS, GM, radius, NSMAX, ae);
brmul(info.c_ei, ae, 3, 3, 1, ai);
for (n = 0; n < 3; n++)
{
as[n] = ai[n] / AU * 86400 * 86400;
}
if (OTIDE == 0)
{
for (n = 0; n < 3; n++)
{
ao[n] = 0;
}
return 0;
}
// NOMAX = 4;
// COEFO = (double *) calloc ( (NOMAX + 1) * (NOMAX + 1), sizeof(double));
otidecs(info.jdt, info.gmst, NOMAX, COEFO);
// printf ("jdt = %e\t gmst = %f\t NOMAX = %d\t COEFO = %e\n", info.jdt, info.gmst, NOMAX, COEFO[5]);
// for (n=0;n<25;n++)
// printf ("COEFO = %e\t COEFS = %e\n", COEFO[n], COEFS[n]);
cs2acc (llr, COEFO, GM, radius, NOMAX, ae);
brmul(info.c_ei, ae, 3, 3, 1, ai);
for (n = 0; n < 3; n++)
{
ao[n] = ai[n] / AU * 86400 * 86400;
}
if (STIDE == 0)
{
for (n = 0; n < 3; n++)
{
as[n] = 0;
}
return 0;
}
return 0;
}
int openotcs (char *infile)
{
FILE *fp_ot;
int i;
char string[100];
if ((fp_ot = fopen (infile,"r")) == NULL)
{
printf ("Cannot open otide file?\n");
exit (0);
}
// fgets (string, 100, fp_ot);
// fgets (string, 100, fp_ot);
// fgets (string, 100, fp_ot);
// fgets (string, 100, fp_ot);
i = 0;
while (1)
{
if (fgets (string, 100, fp_ot) == NULL) break;
sscanf (string, "%lf%s%d%d%lf%lf%lf%lf",
&otfes[i].ds, &otfes[i].name, &otfes[i].n, &otfes[i].m, &otfes[i].cp, &otfes[i].sp, &otfes[i].cm, &otfes[i].sm);
otfes[i].argn[0] = (int)(otfes[i].ds/100)%10;
otfes[i].argn[1] = (int)(otfes[i].ds/10)%10 - 5;
otfes[i].argn[2] = (int)(otfes[i].ds/1)%10 - 5;
otfes[i].argn[3] = (int)(otfes[i].ds*10)%10 - 5;
otfes[i].argn[4] = (int)(otfes[i].ds*100)%10 - 5;
otfes[i].argn[5] = (int)(otfes[i].ds*1000)%10 - 5;
i++;
}
// (*n) = i;
fclose(fp_ot);
return 0;
}
double otidecs(double jdt, double gmst, int nmax, double *coef)
{
double doodarg[6], ang, cp, sp, cm, sm;
int i, ncon = 1, n,m, l, ind;
for (i = 0; i < NFES; i++)
{
if (otfes[i].n > nmax)
{
continue;
}
n = otfes[i].n;
m = otfes[i].m;
cp = otfes[i].cp;
sp = otfes[i].sp;
cm = otfes[i].cm;
sm = otfes[i].sm;
// DOODSN(&jdt, &gmst, otfes[i].argn, &ncon, doodarg, &ang);
// ang=0;
// printf ("cp = %e\t sp = %e\t ang = %e\t doodarg = %e\n", cp, sp, ang, doodarg[3]);
if (m == 0)
{
coef[n] = coef[n] + 1e-11 * ((cp+cm) * cos(ang) + (sp+sm)*sin(ang));
}
else
{
l = nmax - m + 1;
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
coef[ind + n - m] = coef[ind + n - m] + 1e-11 * ((cp+cm) * cos(ang) + (sp+sm)*sin(ang));
coef[ind + n - m + l] = coef[ind + n - m + l] + 1e-11 * ((sp-sm) * cos(ang) - (cp-cm)*sin(ang));
}
}
return 0;
}
// nmax = 4;
// stcs = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
double stidecs_Anelastic(InfStruct *info, int id_perm, double *stcs)
{
// double gms2e = 332946.048166;
// double gmm2e = 1/81.3005690699;
// double gms2e = 332946.0487185;
// double gmm2e = 1/81.3005538970823;
double GMsun = 1.32712442076e20;
double gms2e, gmm2e = 0.0123000383;
// double c20pt = -4.1736e-9;
double c20pt = -4.201e-9;
double k20 = 0.29525;
double k21 = 0.29470;
double k22 = 0.29801;
double REk20 = 0.30190;
double REk21 = 0.29830;
double REk22 = 0.30102;
double IMk21 = -0.00144;
double IMk22 = -0.00130;
double k20pa = -0.00089;
double k21pa = -0.00080;
double k22pa = -0.00057;
double k20p = -0.00087;
double k21p = -0.00079;
double k22p = -0.00057;
double k30 = 0.093;
double k31 = 0.093;
double k32 = 0.093;
double k33 = 0.094;
short int moon = 9, earth = 2, sun = 10, n;
double ps[3], vs[3], pm[3], vm[3], tjd[2],
pse[3], pme[3], llrs[3], llrm[3], pbar[4], t,
p20m, p30m, p21m, p31m, p22m, p32m, p33m,
p20s, p30s, p21s, p31s, p22s, p32s, p33s,
rerm, rers, c20, c30, c40, c21, s21, c22, s22, c31, s31,
c32, s32, c33, s33, c41, s41, c42, s42,
c20f, c21f, s21f, c22f, s22f;
double GM, radius;
GM = 398600.44180E+09;
radius = 6378136.6;
gms2e = GMsun/GM;
tjd[0] = info->jd0;
tjd[1] = info->tt/86400.0;
// Luni-solar ephemeris
planet_ephemeris (tjd, sun, earth, ps, vs);
planet_ephemeris (tjd, moon, earth, pm, vm);
for (n = 0; n < 3; n++)
{
ps[n] = ps[n] * AU;
pm[n] = pm[n] * AU;
}
// icrf2itrf(num, ps, pse);
// icrf2itrf(num, pm, pme);
brmul (info->c_ie, ps, 3, 3, 1, pse); //inertial to fixed matrix gmat = rmat*tbt
brmul (info->c_ie, pm, 3, 3, 1, pme); //inertial to fixed matrix gmat = rmat*tbt
xyz2llh(pse, llrs);
xyz2llh(pme, llrm);
t = sin(llrm[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20m = pbar[2]; p30m = pbar[3];
lgdr(t, 3, 1, pbar); p21m = pbar[1]; p31m = pbar[2];
lgdr(t, 3, 2, pbar); p22m = pbar[0]; p32m = pbar[1];
lgdr(t, 3, 3, pbar); p33m = pbar[0];
t = sin(llrs[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20s = pbar[2]; p30s = pbar[3];
lgdr(t, 3, 1, pbar); p21s = pbar[1]; p31s = pbar[2];
lgdr(t, 3, 2, pbar); p22s = pbar[0]; p32s = pbar[1];
lgdr(t, 3, 3, pbar); p33s = pbar[0];
rerm = radius / llrm[2];
rers = radius / llrs[2];
// Frequency Independent Terms
// C20
c20 = REk20/5.0 * ( gmm2e * pow(rerm, 3) * p20m
+ gms2e * pow(rers, 3) * p20s );
// C21/S21
c21 = + REk21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) )
+ IMk21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
s21 = - IMk21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) )
+ REk21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
// C22/S22
c22 = + REk22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) )
+ IMk22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
s22 = - IMk22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) )
+ REk22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
// C30
c30 = k30/7.0 * ( gmm2e * pow(rerm, 4) * p30m
+ gms2e * pow(rers, 4) * p30s );
// C31/S31
c31 = k31/7.0 * ( gmm2e * pow(rerm, 4) * p31m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 4) * p31s * cos(llrs[1] * DEG2RAD) );
s31 = k31/7.0 * ( gmm2e * pow(rerm, 4) * p31m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 4) * p31s * sin(llrs[1] * DEG2RAD) );
// C32/S32
c32 = k32/7.0 * ( gmm2e * pow(rerm, 4) * p32m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 4) * p32s * cos(llrs[1] * DEG2RAD * 2.0) );
s32 = k32/7.0 * ( gmm2e * pow(rerm, 4) * p32m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 4) * p32s * sin(llrs[1] * DEG2RAD * 2.0) );
// C33/S33
c33 = k33/7.0 * ( gmm2e * pow(rerm, 4) * p33m * cos(llrm[1] * DEG2RAD * 3.0)
+ gms2e * pow(rers, 4) * p33s * cos(llrs[1] * DEG2RAD * 3.0) );
s33 = k33/7.0 * ( gmm2e * pow(rerm, 4) * p33m * sin(llrm[1] * DEG2RAD * 3.0)
+ gms2e * pow(rers, 4) * p33s * sin(llrs[1] * DEG2RAD * 3.0) );
// C40
c40 = k20pa/5.0* ( gmm2e * pow(rerm, 3) * p20m
+ gms2e * pow(rers, 3) * p20s );
// C41/S41
c41 = k21pa/5.0* ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) );
s41 = k21pa/5.0* ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
// C42/S42
c42 = k22pa/5.0* ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) );
s42 = k22pa/5.0* ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
stcs[0] = 0; //c00;
stcs[1] = 0; //c10;
stcs[2] = c20;
stcs[3] = c30;
stcs[4] = c40;
stcs[5] = 0; //c11;
stcs[6] = c21;
stcs[7] = c31;
stcs[8] = c41;
stcs[9] = 0; //s11;
stcs[10] = s21;
stcs[11] = s31;
stcs[12] = s41;
stcs[13] = c22;
stcs[14] = c32;
stcs[15] = c42;
stcs[16] = s22;
stcs[17] = s32;
stcs[18] = s42;
stcs[19] = c33;
stcs[20] = 0; //c43;
stcs[21] = s33;
stcs[22] = 0; //s43;
stcs[23] = 0; //c44;
stcs[24] = 0; //s44;
// Frequency Dependent Terms
c20f = 0; c21f = 0; s21f = 0; c22f = 0; s22f = 0;
// stfrqdep(info->jdt, info->gmst, &c20f, &c21f, &s21f, &c22f, &s22f);
stcs[2] = c20 + c20f;
stcs[6] = c21 + c21f;
stcs[10] = s21 + s21f;
stcs[13] = c22 + c22f;
stcs[16] = s22 + s22f;
if(id_perm==1)
{
stcs[2] = c20 + c20f - c20pt;
}
return 0;
}
// nmax = 4;
// stcs = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
double stidecs(double *tjd, double *c_ie, int id_perm, double *stcs)
{
// double gms2e = 332946.048166;
// double gmm2e = 1/81.3005690699;
// double gms2e = 332946.0487185;
// double gmm2e = 1/81.3005538970823;
double GMsun = 1.32712442076e20;
double gms2e, gmm2e = 0.0123000383;
// double c20pt = -4.1736e-9;
double c20pt = -4.201e-9;
double k20 = 0.29525;
double k21 = 0.29470;
double k22 = 0.29801;
/*
double REk20 = 0.30190;
double REk21 = 0.29830;
double REk22 = 0.30102;
double IMk21 = −0.00144;
double IMk22 = −0.00130;
double k20pa = −0.00089;
double k21pa = −0.00080;
double k22pa = −0.00057;
*/
double k20p = -0.00087;
double k21p = -0.00079;
double k22p = -0.00057;
double k30 = 0.093;
double k31 = 0.093;
double k32 = 0.093;
double k33 = 0.094;
short int moon = 9, earth = 2, sun = 10, n;
double ps[3], vs[3], pm[3], vm[3],
pse[3], pme[3], llrs[3], llrm[3], pbar[4], t,
p20m, p30m, p21m, p31m, p22m, p32m, p33m,
p20s, p30s, p21s, p31s, p22s, p32s, p33s,
rerm, rers, c20, c30, c40, c21, s21, c22, s22, c31, s31,
c32, s32, c33, s33, c41, s41, c42, s42,
c20f, c21f, s21f, c22f, s22f;
double GM, radius;
GM = 398600.44180E+09;
radius = 6378136.6;
gms2e = GMsun/GM;
// tjd[0] = info[num].jd0;
// tjd[1] = info[num].tt/86400.0;
// Luni-solar ephemeris
planet_ephemeris (tjd, sun, earth, ps, vs);
planet_ephemeris (tjd, moon, earth, pm, vm);
for (n = 0; n < 3; n++)
{
ps[n] = ps[n] * AU;
pm[n] = pm[n] * AU;
}
// icrf2itrf(num, ps, pse);
// icrf2itrf(num, pm, pme);
brmul (c_ie, ps, 3, 3, 1, pse); //inertial to fixed matrix gmat = rmat*tbt
brmul (c_ie, pm, 3, 3, 1, pme); //inertial to fixed matrix gmat = rmat*tbt
xyz2llh(pse, llrs);
xyz2llh(pme, llrm);
t = sin(llrm[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20m = pbar[2]; p30m = pbar[3];
lgdr(t, 3, 1, pbar); p21m = pbar[1]; p31m = pbar[2];
lgdr(t, 3, 2, pbar); p22m = pbar[0]; p32m = pbar[1];
lgdr(t, 3, 3, pbar); p33m = pbar[0];
t = sin(llrs[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20s = pbar[2]; p30s = pbar[3];
lgdr(t, 3, 1, pbar); p21s = pbar[1]; p31s = pbar[2];
lgdr(t, 3, 2, pbar); p22s = pbar[0]; p32s = pbar[1];
lgdr(t, 3, 3, pbar); p33s = pbar[0];
rerm = radius / llrm[2];
rers = radius / llrs[2];
// Frequency Independent Terms
// C20
c20 = k20/5.0 * ( gmm2e * pow(rerm, 3) * p20m
+ gms2e * pow(rers, 3) * p20s );
// C21/S21
c21 = k21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) );
s21 = k21/5.0 * ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
// C22/S22
c22 = k22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) );
s22 = k22/5.0 * ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
// C30
c30 = k30/7.0 * ( gmm2e * pow(rerm, 4) * p30m
+ gms2e * pow(rers, 4) * p30s );
// C31/S31
c31 = k31/7.0 * ( gmm2e * pow(rerm, 4) * p31m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 4) * p31s * cos(llrs[1] * DEG2RAD) );
s31 = k31/7.0 * ( gmm2e * pow(rerm, 4) * p31m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 4) * p31s * sin(llrs[1] * DEG2RAD) );
// C32/S32
c32 = k32/7.0 * ( gmm2e * pow(rerm, 4) * p32m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 4) * p32s * cos(llrs[1] * DEG2RAD * 2.0) );
s32 = k32/7.0 * ( gmm2e * pow(rerm, 4) * p32m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 4) * p32s * sin(llrs[1] * DEG2RAD * 2.0) );
// C33/S33
c33 = k33/7.0 * ( gmm2e * pow(rerm, 4) * p33m * cos(llrm[1] * DEG2RAD * 3.0)
+ gms2e * pow(rers, 4) * p33s * cos(llrs[1] * DEG2RAD * 3.0) );
s33 = k33/7.0 * ( gmm2e * pow(rerm, 4) * p33m * sin(llrm[1] * DEG2RAD * 3.0)
+ gms2e * pow(rers, 4) * p33s * sin(llrs[1] * DEG2RAD * 3.0) );
// C40
c40 = k20p/5.0* ( gmm2e * pow(rerm, 3) * p20m
+ gms2e * pow(rers, 3) * p20s );
// C41/S41
c41 = k21p/5.0* ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) );
s41 = k21p/5.0* ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
// C42/S42
c42 = k22p/5.0* ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) );
s42 = k22p/5.0* ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
stcs[0] = 0; //c00;
stcs[1] = 0; //c10;
stcs[2] = c20;
stcs[3] = c30;
stcs[4] = c40;
stcs[5] = 0; //c11;
stcs[6] = c21;
stcs[7] = c31;
stcs[8] = c41;
stcs[9] = 0; //s11;
stcs[10] = s21;
stcs[11] = s31;
stcs[12] = s41;
stcs[13] = c22;
stcs[14] = c32;
stcs[15] = c42;
stcs[16] = s22;
stcs[17] = s32;
stcs[18] = s42;
stcs[19] = c33;
stcs[20] = 0; //c43;
stcs[21] = s33;
stcs[22] = 0; //s43;
stcs[23] = 0; //c44;
stcs[24] = 0; //s44;
// Frequency Dependent Terms
c20f = 0; c21f = 0; s21f = 0; c22f = 0; s22f = 0;
// stfrqdep(info.jdt, info.gmst, &c20f, &c21f, &s21f, &c22f, &s22f);
stcs[2] = c20 + c20f;
stcs[6] = c21 + c21f;
stcs[10] = s21 + s21f;
stcs[13] = c22 + c22f;
stcs[16] = s22 + s22f;
if(id_perm==1)
{
stcs[2] = c20 + c20f - c20pt;
}
return 0;
}
double stfrqdep(double jdt, double gmst, double *c20f, double *c21f, double *s21f, double *c22f, double *s22f)
{
double sets[71][8] = {
0,5,5,5,6,5, 16.6e-12, -6.7e-12,
0,5,5,5,7,5, -0.1e-12, 0.1e-12,
0,5,6,5,5,4, -1.2e-12, 0.8e-12,
0,5,7,5,5,5, -5.5e-12, 4.3e-12,
0,5,7,5,6,5, 0.1e-12, -0.1e-12,
0,5,8,5,5,4, -0.3e-12, 0.2e-12,
0,6,3,6,5,5, -0.3e-12, 0.7e-12,
0,6,5,4,4,5, 0.1e-12, -0.2e-12,
0,6,5,4,5,5, -1.2e-12, 3.7e-12,
0,6,5,4,6,5, 0.1e-12, -0.2e-12,
0,6,5,6,5,5, 0.1e-12, -0.2e-12,
0,7,3,5,5,5, 0.0e-12, 0.6e-12,
0,7,5,3,5,5, 0.0e-12, 0.3e-12,
0,7,5,5,5,5, 0.6e-12, 6.3e-12,
0,7,5,5,6,5, 0.2e-12, 2.6e-12,
0,7,5,5,7,5, 0.0e-12, 0.2e-12,
0,8,3,6,5,5, 0.1e-12, 0.2e-12,
0,8,5,4,5,5, 0.4e-12, 1.1e-12,
0,8,5,4,6,5, 0.2e-12, 0.5e-12,
0,9,3,5,5,5, 0.1e-12, 0.2e-12,
0,9,5,3,5,5, 0.1e-12, 0.1e-12,
1,2,5,7,5,5, -0.1e-12, 0.0e-12,
1,2,7,5,5,5, -0.1e-12, 0.0e-12,
1,3,5,6,4,5, -0.1e-12, 0.0e-12,
1,3,5,6,5,5, -0.7e-12, 0.1e-12,
1,3,7,4,5,5, -0.1e-12, 0.0e-12,
1,4,5,5,4,5, -1.3e-12, 0.1e-12,
1,4,5,5,5,5, -6.8e-12, 0.6e-12,
1,4,7,5,5,5, 0.1e-12, 0.0e-12,
1,5,3,6,5,5, 0.1e-12, 0.0e-12,
1,5,5,4,4,5, 0.1e-12, 0.0e-12,
1,5,5,4,5,5, 0.4e-12, 0.0e-12,
1,5,5,6,5,5, 1.3e-12, -0.1e-12,
1,5,5,6,6,5, 0.3e-12, 0.0e-12,
1,5,7,4,5,5, 0.3e-12, 0.0e-12,
1,5,7,4,6,5, 0.1e-12, 0.0e-12,
1,6,2,5,5,6, -1.9e-12, 0.1e-12,
1,6,3,5,4,5, 0.5e-12, 0.0e-12,
1,6,3,5,5,5, -43.4e-12, 2.9e-12,
1,6,4,5,5,4, 0.6e-12, 0.0e-12,
1,6,4,5,5,6, 1.6e-12, -0.1e-12,
1,6,5,3,4,5, 0.1e-12, 0.0e-12,
1,6,5,5,3,5, 0.1e-12, 0.0e-12,
1,6,5,5,4,5, -8.8e-12, 0.5e-12,
1,6,5,5,5,5, 470.9e-12, -30.2e-12,
1,6,5,5,6,5, 68.1e-12, -4.6e-12,
1,6,5,5,7,5, -1.6e-12, 0.1e-12,
1,6,6,4,5,5, 0.1e-12, 0.0e-12,
1,6,6,5,4,4, -0.1e-12, 0.0e-12,
1,6,6,5,5,4, -20.6e-12, -0.3e-12,
1,6,6,5,5,6, 0.3e-12, 0.0e-12,
1,6,6,5,6,4, -0.3e-12, 0.0e-12,
1,6,7,3,5,5, -0.2e-12, 0.0e-12,
1,6,7,3,6,5, -0.1e-12, 0.0e-12,
1,6,7,5,5,5, -5.0e-12, 0.3e-12,
1,6,7,5,6,5, 0.2e-12, 0.0e-12,
1,6,8,5,5,4, -0.2e-12, 0.0e-12,
1,7,3,6,5,5, -0.5e-12, 0.0e-12,
1,7,3,6,6,5, -0.1e-12, 0.0e-12,
1,7,5,4,4,5, 0.1e-12, 0.0e-12,
1,7,5,4,5,5, -2.1e-12, 0.1e-12,
1,7,5,4,6,5, -0.4e-12, 0.0e-12,
1,8,3,5,5,5, -0.2e-12, 0.0e-12,
1,8,5,3,5,5, -0.1e-12, 0.0e-12,
1,8,5,5,5,5, -0.6e-12, 0.0e-12,
1,8,5,5,6,5, -0.4e-12, 0.0e-12,
1,8,5,5,7,5, -0.1e-12, 0.0e-12,
1,9,5,4,5,5, -0.1e-12, 0.0e-12,
1,9,5,4,6,5, -0.1e-12, 0.0e-12,
2,4,5,6,5,5, -0.3e-12, 0.0e-12,
2,5,5,5,5,5, -1.2e-12, 0.0e-12
};
double doodarg[6], ang, c20 = 0, c21 = 0, s21 = 0, c22 = 0, s22 = 0;
int i, nsets = 71, argn[6], ncon = 1;
for (i=0;i<nsets;i++)
{
argn[0] = (int)sets[i][0];
argn[1] = (int)sets[i][1] - 5;
argn[2] = (int)sets[i][2] - 5;
argn[3] = (int)sets[i][3] - 5;
argn[4] = (int)sets[i][4] - 5;
argn[5] = (int)sets[i][5] - 5;
// DOODSN(&info[num].jdt, &info[num].gmst, argn, &ncon, doodarg, &ang);
// DOODSN(&jdt, &gmst, otfes[i].argn, &ncon, doodarg, &ang);
// C20 correction: Long period tidal constituent
if(argn[0]==0)
{
c20 = c20 + sets[i][6]*cos(ang) - sets[i][7]*sin(ang);
}
// C21/S21 correction: Diurnal period tidal constituent
if(argn[0]==1)
{
c21 = c21 + sets[i][6]*sin(ang) + sets[i][7]*cos(ang);
s21 = s21 + sets[i][6]*cos(ang) - sets[i][7]*sin(ang);
}
// C22/S22 correction: Semi-diurnal period tidal constituent
if(argn[0]==2)
{
c22 = c22 + sets[i][6]*cos(ang);
s22 = s22 - sets[i][6]*sin(ang);
}
}
*c20f = c20;
*c21f = c21;
*s21f = s21;
*c22f = c22;
*s22f = s22;
return 0;
}
double accel_gravt (double *tjd, double *xic, double *a4)
{
int n, lps;
double GM, radius, pi[3], pe[3], llr[3], c_ie[9], c_ei[9], ae[3], ai[3];
double jd0, tt, utc, te[9], tx[3], ty[3], tz[3],
vx[3] = {1,0,0}, vy[3] = {0,1,0}, vz[3] = {0,0,1};
GM = 398600.44150E+09;
radius = 6378136.3;
for (n = 0; n < 3; n++)
{
pi[n] = xic[n] * AU;
}
jd0 = tjd[0];
tt = tjd[1] * 86400.0;
lps = getlps (jd0 + tt/86400.0);
utc = tt - (lps + 32.184);
// printf("%f\t%f\n", jd0, utc);
itrf2gcrf(jd0, utc, vx, tx);
itrf2gcrf(jd0, utc, vy, ty);
itrf2gcrf(jd0, utc, vz, tz);
for (n = 0; n < 3; n++)
{
c_ie[n*3] = tx[n];
c_ie[n*3+1] = ty[n];
c_ie[n*3+2] = tz[n];
}
mt(c_ie, 3, 3, c_ei);
brmul(c_ei, pi, 3, 3, 1, pe);
xyz2llh(pe, llr);
cs2acc (llr, COEFG, GM, radius, NMAX, ae);
// printf("ae = %f\t%f\t%f\n", ae[0], ae[1], ae[2]);
// exit(0);
brmul(c_ie, ae, 3, 3, 1, ai);
for (n = 0; n < 3; n++)
{
a4[n] = ai[n] / AU * 86400 * 86400;
}
return 0;
}
double cs2ada (double *llr, double *cs, int nmax, double *ae,
int part, double *dadre, int flagdadcs)
{
int n, m, k, l, ind, ic, is, label;
double slat, clat, slon, clon, sclt, cclt, *cosml, *sinml,
*aprn, *pbar, *pbar1, *pbar2, *pt, *pt1, *pt2, lat, lon, r, vi, t,
gm, a, an[3], c_en[9], c_ne[9], dadrn[9], dadrne[9], dadre1[9],
dadre2[9], dcdrn[27], dcdre[27], dcdxe[9], dcdye[9], dcdze[9],
dadrecx[3], dadrecy[3], dadrecz[3], dadrec[9], dadrea[9];
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
gm = GMCT;
a = RCT;
lat = llr[0];
lon = llr[1];
r = llr[2];
slat = sin(lat * DEG2RAD);
clat = cos(lat * DEG2RAD);
slon = sin(lon * DEG2RAD);
clon = cos(lon * DEG2RAD);
sclt = clat;
cclt = slat;
cosml = (double *) calloc ( nmax + 1, sizeof(double)); //cos(m*lamta)
sinml = (double *) calloc ( nmax + 1, sizeof(double)); //sin(m*lamta)
aprn = (double *) calloc ( nmax + 1, sizeof(double)); //sin(m*lamta)
pbar = (double *) calloc ( nmax + 1, sizeof(double));
pbar1 = (double *) calloc ( nmax + 1, sizeof(double));
pbar2 = (double *) calloc ( nmax + 1, sizeof(double));
pt = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
pt1 = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
pt2 = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
cosml[0] = 1; sinml[0] = 0;
for (m = 1; m <= nmax; m++)
{
cosml[m] = cos(m * lon * DEG2RAD);
sinml[m] = sin(m * lon * DEG2RAD);
}
for (n = 0; n <= nmax; n++)
{
aprn[n] = pow (a / r, n) * gm / r;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
an[0] = 0; an[1] = 0; an[2] = 0;
t = cclt; vi = 0;
for (m = 0; m <= nmax; m ++)
{
l = nmax - m + 1;
lgdr2(t, nmax, m, pbar, pbar1, pbar2);
for (k = 0; k < l; k++)
{
if (m==0)
{
n = k;
ic = n;
is = 0;
}
else
{
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
n = k + m;
ic = ind + n - m;
is = ind + n - m + l;
}
pt[ic] = aprn[n] * pbar[k] * cosml[m];
pt[is] = aprn[n] * pbar[k] * sinml[m];
pt1[ic] = aprn[n] * pbar1[k] * cosml[m];
pt1[is] = aprn[n] * pbar1[k] * sinml[m];
pt2[ic] = aprn[n] * pbar2[k] * cosml[m];
pt2[is] = aprn[n] * pbar2[k] * sinml[m];
vi = vi + pt[ic] * cs[ic] + pt[is] * cs[is];
an[0] = an[0] + pt1[ic] * cs[ic] + pt1[is] * cs[is];
an[1] = an[1] - m * pt[is] * cs[ic] + m * pt[ic] * cs[is];
an[2] = an[2] + (n+1) * pt[ic] * cs[ic] + (n+1) * pt[is] * cs[is];
// an[2] = an[2] + (n+1) * ( pt[ic] * cs[ic] + pt[is] * cs[is]);
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
free (pbar);
free (pbar1);
free (pbar2);
free (cosml);
free (sinml);
free (aprn);
an[0] = - an[0] / r;
an[1] = + an[1] / r / sclt;
an[2] = + an[2] / r;
c_ne[0] = - slat * clon; //nsys: North-East-Down
c_ne[1] = - slon;
c_ne[2] = - clat * clon;
c_ne[3] = - slat * slon;
c_ne[4] = clon;
c_ne[5] = - clat * slon;
c_ne[6] = clat;
c_ne[7] = 0;
c_ne[8] = - slat;
brmul(c_ne, an, 3, 3, 1, ae); //from n-sys to e-sys
for (n = 0; n < 9; n++)
dadre[n] = 0;
if (part == 0)
{
free (pt);
free (pt1);
free (pt2);
return vi;
}
for (n = 0; n < 9; n++)
dadrn[n] = 0;
for (m = 0; m <= nmax; m ++)
{
l = nmax - m + 1;
for (k = 0; k < l; k++)
{
if (m==0)
{
n = k;
ic = n;
is = 0;
}
else
{
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
n = k + m;
ic = ind + n - m;
is = ind + n - m + l;
}
dadrn[0] += pt2[ic] * cs[ic] + pt2[is] * cs[is];
dadrn[3] += m * ( - pt[is] * cs[ic] + pt[ic] * cs[is]) * cclt / sclt / sclt
- m * ( - pt1[is] * cs[ic] + pt1[ic] * cs[is]) / sclt;
dadrn[6] -= (n+1) * (pt1[ic] * cs[ic] + pt1[is] * cs[is]);
dadrn[1] -= m * ( - pt1[is] * cs[ic] + pt1[ic] * cs[is]) / sclt;
dadrn[4] -= m * m * (pt[ic] * cs[ic] + pt[is] * cs[is]) / sclt / sclt;
dadrn[7] += m * (n+1) * ( - pt[is] * cs[ic] + pt[ic] * cs[is]) / sclt;
dadrn[2] -= (n+2) * (pt1[ic] * cs[ic] + pt1[is] * cs[is]);
dadrn[5] += m * (n+2) * ( - pt[is] * cs[ic] + pt[ic] * cs[is]) / sclt;
dadrn[8] += (n+2) * (n+1)* (pt[ic] * cs[ic] + pt[is] * cs[is]);
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
for (n = 0; n < 9; n++)
dadrn[n] = dadrn[n] / r / r;
mt(c_ne, 3, 3, c_en);
brmul(dadrn, c_en, 3, 3, 3, dadrne);
brmul(c_ne, dadrne, 3, 3, 3, dadrea);
for (n = 0; n < 27; n++)
dcdrn[n] = 0;
dcdrn[0] = - sclt * clon; dcdrn[1] = cclt * slon / sclt;
dcdrn[3] = 0; dcdrn[4] = - clon / sclt;
dcdrn[6] = cclt * clon;/**/dcdrn[7] = slon;
dcdrn[9] = - sclt * slon; dcdrn[10] = - cclt * clon / sclt;
dcdrn[12] = 0; dcdrn[13] = - slon / sclt;
dcdrn[15] = cclt * slon; dcdrn[16] = - clon;
dcdrn[18] = - cclt; dcdrn[19] = 0;
dcdrn[21] = 0; dcdrn[22] = 0;
dcdrn[24] = - sclt; dcdrn[25] = 0;
for (n = 0; n < 27; n++)
dcdrn[n] = dcdrn[n] / r;
brmul(dcdrn, c_en, 9, 3, 3, dcdre);
for (n = 0; n < 9; n++)
{
dcdxe[n] = dcdre[n*3];
dcdye[n] = dcdre[n*3+1];
dcdze[n] = dcdre[n*3+2];
// printf ("%e\n", dcdze[n]);
}
brmul(dcdxe, an, 3, 3, 1, dadrecx);
brmul(dcdye, an, 3, 3, 1, dadrecy);
brmul(dcdze, an, 3, 3, 1, dadrecz);
for (n = 0; n < 3; n++)
{
dadrec[n*3] = dadrecx[n];
dadrec[n*3+1] = dadrecy[n];
dadrec[n*3+2] = dadrecz[n];
}
for (n = 0; n <= 8; n++)
{
dadre[n] = dadrec[n] + dadrea[n];
}
if (flagdadcs == 0)
{
free (pt);
free (pt1);
free (pt2);
return vi;
}
// for dadcs_nm
//
for (k = 0; k < MGCS; k ++)
{
n = CSinfo[k].n; m = CSinfo[k].m; label = CSinfo[k].cs;
if (m == 0)
{
ic = n;
CSinfo[k].dadcsn[0] = - pt1[ic] / r;
CSinfo[k].dadcsn[1] = 0;
CSinfo[k].dadcsn[2] = (n+1) * pt[ic] / r;
}
else
{
l = nmax - m + 1;
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
ic = ind + n - m;
is = ind + n - m + l;
if (label == 1)
{
CSinfo[k].dadcsn[0] = - pt1[ic] / r;
CSinfo[k].dadcsn[1] = - m * pt[is] / r / sclt;
CSinfo[k].dadcsn[2] = (n+1) * pt[ic] / r;
}
if (label == -1)
{
CSinfo[k].dadcsn[0] = - pt1[is] / r;
CSinfo[k].dadcsn[1] = m * pt[ic] / r / sclt;
CSinfo[k].dadcsn[2] = (n+1) * pt[is] / r;
}
}
brmul(c_ne, CSinfo[k].dadcsn, 3, 3, 1, CSinfo[k].dadcse);
}
free (pt);
free (pt1);
free (pt2);
return vi;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double cs2acc (double *llr, double *cs, double gm, double a, int nmax,
double *acc)
{
int n, m, k, l, ind;
double sinf, cosf, sinlon, coslon, sincolat, coscolat, *cosml, *sinml,
*aprn, *pbar, *pbar1, *pbar2, accn[3], c_ei[9], c_en[9], c_in[9],
*pt, *ptt, lat, lon, r, vi, dvdr, dvdcolat, dvdlon, t;
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
lat = llr[0];
lon = llr[1];
r = llr[2];
sinf = sin(lat * DEG2RAD);
cosf = cos(lat * DEG2RAD);
sinlon = sin(lon * DEG2RAD);
coslon = cos(lon * DEG2RAD);
sincolat = cosf;
coscolat = sinf;
// #pragma omp parallel private(cosml, sinml, aprn, pbar, pbar1, pbar2, n, m, k, l, ind, sinf, cosf)
cosml = (double *) calloc ( nmax + 1, sizeof(double)); //cos(m*lamta)
sinml = (double *) calloc ( nmax + 1, sizeof(double)); //sin(m*lamta)
aprn = (double *) calloc ( nmax + 1, sizeof(double)); //sin(m*lamta)
pbar = (double *) calloc ( nmax + 1, sizeof(double));
pbar1 = (double *) calloc ( nmax + 1, sizeof(double));
pbar2 = (double *) calloc ( nmax + 1, sizeof(double));
pt = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
ptt = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
for (m = 0; m <= nmax; m++)
{
cosml[m] = cos(m * lon * DEG2RAD);
sinml[m] = sin(m * lon * DEG2RAD);
}
for (n = 0; n <= nmax; n++)
{
aprn[n] = pow (a / r, n) * gm / r;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
t = coscolat; vi = 0; dvdlon = 0; dvdcolat = 0; dvdr = 0;
for (m = 0; m <= nmax; m ++)
{
l = nmax - m + 1;
lgdr2(t, nmax, m, pbar, pbar1, pbar2);
// lgdr(t, nmax, m, pbar);
for (k = 0; k < l; k++)
{
if (m==0)
{
// ind = 0;
n = k + m;
pt[k] = aprn[n] * pbar[k];
ptt[k] = aprn[n] * pbar1[k];
vi = vi + pt[k] * cs[k];
// if (n>=2)
{
dvdr = dvdr + (n+1) * pt[k] * cs[k];
dvdcolat = dvdcolat + ptt[k] * cs[k];
}
}
else
{
ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1);
n = k + m;
pt[ind + n - m] = aprn[n] * pbar[k] * cosml[m];
pt[ind + n - m + l] = aprn[n] * pbar[k] * sinml[m];
ptt[ind + n - m] = aprn[n] * pbar1[k] * cosml[m];
ptt[ind + n - m + l] = aprn[n] * pbar1[k] * sinml[m];
vi = vi + pt[ind + n - m] * cs[ind + n - m];
vi = vi + pt[ind + n - m + l] * cs[ind + n - m + l];
dvdcolat = dvdcolat + ptt[ind + n - m] * cs[ind + n - m];
dvdcolat = dvdcolat + ptt[ind + n - m + l] * cs[ind + n - m + l];
dvdlon = dvdlon - m * pt[ind + n - m + l] * cs[ind + n - m];
dvdlon = dvdlon + m * pt[ind + n - m] * cs[ind + n - m + l];
dvdr = dvdr + (n+1) * pt[ind + n - m] * cs[ind + n - m];
dvdr = dvdr + (n+1) * pt[ind + n - m + l] * cs[ind + n - m + l];
}
}
}
// dvdcolat = - dvdcolat * sincolat; //tmd!!
dvdcolat = dvdcolat;
dvdlon = + dvdlon;
dvdr = - dvdr / r;
accn[0] = - dvdcolat / r;
accn[1] = + dvdlon / r / sincolat;
accn[2] = - dvdr;
c_en[0] = - sinf * coslon; //from fixed to up-east-north system: rmat
c_en[1] = - sinlon;
c_en[2] = - cosf * coslon;
c_en[3] = - sinf * sinlon;
c_en[4] = coslon;
c_en[5] = - cosf * sinlon;
c_en[6] = cosf;
c_en[7] = 0;
c_en[8] = - sinf;
// mt(info[num].c_ie, 3, 3, info[num].c_ei);
// brmul (info[num].c_ei, c_en, 3, 3, 3, c_in); //inertial to fixed matrix gmat = rmat*tbt
// brmul(c_in, accn, 3, 3, 1, acc); //from fixed acc to inertial acc
brmul(c_en, accn, 3, 3, 1, acc); //from fixed acc to inertial acc
// *v = vi;
// *dvdt = - ANGVEL * dvdlon;
free (pbar);
free (pbar1);
free (pbar2);
free (pt);
free (ptt);
free (cosml);
free (sinml);
free (aprn);
return 1;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double lgdr(double t, int nmax, int m, double *pbar)
/*
! THIS CALCULATES THE FULLY NORMALIZED LEGENDRE FUNCTION WITH GIVEN ORDER(M),
! MAXIMUM DEGREE (NMAX), AND GIVEN EVALUATION POINT, T (COSINES OF COLATITUDE).
! THIS RETURNS ALL Pn,m, P'n,m, AND P''n,m (m=<n<=Nmax).
! THE RECURSION FORMULAR FOR THE FUNCTION ITSELF IS GIVEN IN JEKELI(1996).
! THE RECURSION FORMULAR FOR THE 1ST DERIVATIVE IS GIVEN IN TSCHERNING, ET AL(1983).
! THE FORMULAR FOR THE 2ND DERIVATIVE IS FROM THE ASSOCIATE LEGENDRE EQUATION.
! NOTE : EQUATIONS GIVEN IN TSCHERNING, ET AL(1983) HAVE ERRATA.
!
! S.C. Han, 1/24/01 (MODIFIED FOR CRAY T94 2/13/01)
!
*/
{
int i;
//REAL*8 :: PBAR(NMAX-M+1),PBAR1(NMAX-M+1),PBAR2(NMAX-M+1),T,P00,P11,C,D
double p00, p11, c, d;
//! THE FULLY NORMALIZED ASSOCIATED LEGENDRE FUNCTION
//! Pm,m : JEKEIL (A.3c) & (A.3d) , P'm,m : TSCHERNING (7)
p00 = 1.0;
p11 = sqrt (3.0*(1.0-t*t));
if (m>=1)
{
pbar[0] = p11;
for (i = 2; i <= m; i++)
{
pbar[0] = sqrt((2.0*i+1.0)/(2.0*i)*(1.0-t*t))*pbar[0];
}
}
else
{
pbar[0]=p00;
}
if (nmax - m + 1 >= 2)
{
pbar[1] = sqrt(2.0*m +3.0) * t * pbar[0];
}
for(i = 3; i <= nmax-m+1; i++)
{
c=((2.0*m+2.0*i-3.0) * (2.0*m + 2.0*i-1.0)) / ((i-1.0)*(2.0*m+i-1.0));
d=((2.0*m+2.0*i-1.0)*(2.0*m+i-2.0)*(i-2.0))
/ ((2.0*m+2.0*i-5.0)*(i-1.0)*(2.0*m+i-1.0));
pbar[i-1] = sqrt(c)*t*pbar[i-2] - sqrt(d) * pbar[i-3];
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double lgdr2(double t, int nmax, int m,
double *pbar, double *pbar1, double *pbar2)
/*
! THIS CALCULATES THE FULLY NORMALIZED LEGENDRE FUNCTION WITH GIVEN ORDER(M),
! MAXIMUM DEGREE (NMAX), AND GIVEN EVALUATION POINT, T (COSINES OF COLATITUDE).
! THIS RETURNS ALL Pn,m, P'n,m, AND P''n,m (m=<n<=Nmax).
! THE RECURSION FORMULAR FOR THE FUNCTION ITSELF IS GIVEN IN JEKELI(1996).
! THE RECURSION FORMULAR FOR THE 1ST DERIVATIVE IS GIVEN IN TSCHERNING, ET AL(1983).
! THE FORMULAR FOR THE 2ND DERIVATIVE IS FROM THE ASSOCIATE LEGENDRE EQUATION.
! NOTE : EQUATIONS GIVEN IN TSCHERNING, ET AL(1983) HAVE ERRATA.
!
! S.C. Han, 1/24/01 (MODIFIED FOR CRAY T94 2/13/01)
!
*/
{
int i;
//REAL*8 :: PBAR(NMAX-M+1),PBAR1(NMAX-M+1),PBAR2(NMAX-M+1),T,P00,P11,C,D
double p00, p11, c, d;
//! THE FULLY NORMALIZED ASSOCIATED LEGENDRE FUNCTION
//! Pm,m : JEKEIL (A.3c) & (A.3d) , P'm,m : TSCHERNING (7)
p00 = 1.0;
p11 = sqrt (3.0*(1.0-t*t));
if (m>=1)
{
pbar[0] = p11;
pbar1[0] = sqrt(3.0) * t;
for (i = 2; i <= m; i++)
{
pbar1[0] = sqrt((2.0*i+1.0)/(2.0*i))*(sqrt(1.0-t*t)*pbar1[0]+t*pbar[0]);
// pbar1[0] = sqrt((2.0*i+1.0)/(2.0*i))*(sqrt(1.0-t*t)*pbar1[0]+t*pbar[0]/(-sqrt(1.0-t*t)));
pbar[0] = sqrt((2.0*i+1.0)/(2.0*i)*(1.0-t*t))*pbar[0];
}
}
else
{
pbar[0]=p00;
pbar1[0]=0.0;
}
// ! Pm+1,m : JEKEIL (A.3b)
if (nmax - m + 1 >= 2)
{
pbar[1] = sqrt(2.0*m +3.0) * t * pbar[0];
}
// ! Pn,m (n>=m+2) : JEKEIL (A.3a)
for(i = 3; i <= nmax-m+1; i++)
{
c=((2.0*m+2.0*i-3.0) * (2.0*m + 2.0*i-1.0)) / ((i-1.0)*(2.0*m+i-1.0));
d=((2.0*m+2.0*i-1.0)*(2.0*m+i-2.0)*(i-2.0))/((2.0*m+2.0*i-5.0)*(i-1.0)*(2.0*m+i-1.0));
pbar[i-1] = sqrt(c)*t*pbar[i-2] - sqrt(d) * pbar[i-3];
}
// ! THE FULLY NORMALIZED ASSOCIATED LEGENDRE FUNCTION - 1ST DERIVATIVE
// ! P'n,m (n>=m+1) : TSCHERNING (8)
for (i=2; i<=nmax-m+1; i++)
{
c = 1.0/sqrt(1.0-t*t)*t*(m+i-1);
d = 1.0/sqrt(1.0-t*t)*sqrt((((m+i-1)*(m+i-1)-m*m)*(2.0*(m+i-1)+1.0))/(2.0*(m+i-1)-1.0));
//!! found it different from TSCHERNING (8),dcl-2010-2-14
//!! Jianbin confirms code is correct, dcl-2010-2-15
//!! D=1D0/SQRT(1D0-T**2)/SQRT((((M+I-1)**2-M**2)*(2D0*(M+I-1)+1D0))/(2D0*(M+I-1)-1D0))
pbar1[i-1] = c * pbar[i-1] - d * pbar[i-2];
}
//! THE FULLY NORMALIZED ASSOCIATED LEGENDRE FUNCTION - 2ND DERIVATIVE
//! P''n,m (n>=m) : ASSOCIATE LEGENDRE EQUATION (2ND ORDER DIFFERENTIAL EQN.)
for (i=1;i<=nmax-m+1;i++)
{
pbar2[i-1] = (-t/sqrt(1.0-t*t)) * pbar1[i-1]
- ((m+i-1)*(m+i)-m*m/(1.0-t*t)) * pbar[i-1];
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double accel_slrad (double *tjd, double *xic, double *acc)
{
double j, c1, ap, m, rsp, usp[3], xis[6], xsc[6], f,
xist[9], unit[9], rsp3;
short int n, sun;
short int ssbary = 11;
sun = 10;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
planet_ephemeris (tjd, sun, CT, &xsc[0], &xsc[3]);
for (n = 0; n <= 5; n++)
{
xis[n] = xic[n] - xsc[n];
}
rsp = sqrt (xis[0] * xis[0] + xis[1] * xis[1] + xis[2] * xis[2]);
usp[0] = xis[0] / rsp;
usp[1] = xis[1] / rsp;
usp[2] = xis[2] / rsp;
j = 1352.5; //kg/s3
// j = 1359.4; //kg/s3
// m = SATMASS; //kg
// ap = SATAREA; //m2
c1 = j / C * 1 * 1; //kg/s2/m*au*au
f = c1 * AMR / rsp / rsp;
// f = c1 * ap / m / rsp / rsp;
//kg/s2/m*au*au * m2 / kg / au / au = m/s2
f = f / AU * 86400.0 * 86400.0;
acc[0] = f * usp[0];
acc[1] = f * usp[1];
acc[2] = f * usp[2];
return 0;
}
double accel_nbody (double *tjd, double *xic, double *fnt, double *fgr)
{
int n;
short int ssbary = 11;
double xcb[6], xib[6], fnti[3], fntb[3], fgri[3], fgrb[3];
planet_ephemeris (tjd, CT, ssbary, &xcb[0], &xcb[3]);
force_bcrs (tjd, xcb, CT, fntb, fgrb);
for (n = 0; n <= 5; n++)
{
xib[n] = xic[n] + xcb[n];
}
force_bcrs (tjd, xib, CT, fnti, fgri);
// force_bcrs (tjd, xib, 99, fnti, fgri);
for (n = 0; n <= 2; n++)
{
fnt[n] = fnti[n] - fntb[n];
fgr[n] = fgri[n] - fgrb[n];
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double force_bcrs (double *jd, double *xi, short int exclude,
double *fnt, double *fgr)
{
double xj[11][6], xij[11][6], rij[11], xjk[6], rjk,
xddj[3], sumil, sumjk, sdi2, sdj2, rdirdj, rrrdr2, rjirdd, gm[11], GMDE[11],
rij5, rij3, xijt[9], gra, grb, beta, gamma, unit[9],gm2de;
short int ssbary, l, k, j, n, flag_gr;
gm[0] = 2.203208082807623e+13;
gm[1] = 3.248586038641429e+14;
gm[2] = 398600.44150E+09;
gm[3] = 4.28283719012840e+13;
gm[4] = 1.267127698227696e+17;
gm[5] = 3.794062664949063e+16;
gm[6] = 5.794549096929744e+15;
gm[7] = 6.836534169987595e+15;
gm[8] = 9.816009029289940e+11;
gm[9] = 4.902801056E+12;
gm[10] = 1.32712442076e20;
gm2de = 86400.0 * 86400.0 / AU / AU / AU;
for (n = 0; n <= 10; n++)
GMDE[n] = gm[n] * gm2de;
ssbary = 11;
// ssbary = 10;
gamma = 1.0;
beta = 1.0;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
for (j = 0; j <= 10; j++)
{
planet_ephemeris (jd, j, ssbary, &xj[j][0], &xj[j][3]);
for (n = 0; n < 6; n++)
{
xij[j][n] = xi[n] - xj[j][n];
}
rij[j] = sqrt (xij[j][0] * xij[j][0]
+ xij[j][1] * xij[j][1] + xij[j][2] * xij[j][2]);
}
for (n = 0; n < 3; n ++)
fnt[n] = 0;
for (j = 0; j <= 10; j++)
{
if (j == exclude)
continue;
for (n = 0; n < 3; n++)
fnt[n] = fnt[n]
- GMDE[j] / (rij[j] * rij[j] * rij[j]) * xij[j][n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdi2 = xi[3] * xi[3] + xi[4] * xi[4] + xi[5] * xi[5];
sumil = 0;
for (l = 0; l < 11; l ++)
{
if ( l == exclude)
continue;
sumil = sumil + GMDE[l] / rij[l];
}
for (n = 0; n < 3; n ++)
fgr[n] = 0;
for (j = 0; j < 11; j ++)
{
if (j == exclude)
continue;
sumjk = 0;
for (n = 0; n < 3; n ++)
xddj[n] = 0;
for (k = 0; k < 11; k ++)
{
if (k == j)
continue; //k!=j
for (n = 0; n < 3; n++)
xjk[n] = xj[j][n] - xj[k][n];
rjk = sqrt (xjk[0] * xjk[0] + xjk[1] * xjk[1] + xjk[2] * xjk[2]);
sumjk = sumjk + GMDE[k] / rjk;
for (n = 0; n < 3; n ++)
xddj[n] = xddj[n] - GMDE[k] / (rjk * rjk * rjk) * xjk[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdj2 = xj[j][3] * xj[j][3] + xj[j][4] * xj[j][4]
+ xj[j][5] * xj[j][5];
rdirdj = xi[3] * xj[j][3] + xi[4] * xj[j][4] + xi[5] * xj[j][5];
rrrdr2 = pow( ( xij[j][0] * xj[j][3] + xij[j][1] * xj[j][4]
+ xij[j][2] * xj[j][5]) / rij[j], 2);
rjirdd = - ( xij[j][0] * xddj[0] + xij[j][1] * xddj[1]
+ xij[j][2] * xddj[2]);
gra = - 2 * (beta + gamma) * sumil - (2 * beta -1) * sumjk
+ gamma * sdi2 + (1 + gamma) * sdj2
- 2 * (1 + gamma) * rdirdj - 1.5 * rrrdr2 + 0.5 * rjirdd;
grb = xij[j][0] * ((2+2*gamma) * xi[3] - (1+2*gamma) * xj[j][3])
+ xij[j][1] * ((2+2*gamma) * xi[4] - (1+2*gamma) * xj[j][4])
+ xij[j][2] * ((2+2*gamma) * xi[5] - (1+2*gamma) * xj[j][5]);
for (n = 0; n < 3; n ++)
{
fgr[n] = fgr[n]
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* ( - xij[j][n]) * gra / C_AUDAY / C_AUDAY
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* xij[j][n + 3] * grb / C_AUDAY / C_AUDAY
+ GMDE[j] / rij[j] * (3 + 4 * gamma) * 0.5
* xddj[n] / C_AUDAY / C_AUDAY;
}
}
return 1;
}
double modvect (double *v)
{
return sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
}
double dotvect (double *v1, double *v2)
{
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2];
}
void crsvect (double *v1, double *v2, double *v)
{
v[0] = v1[1] * v2[2] - v1[2] * v2[1];
v[1] = v1[2] * v2[0] - v1[0] * v2[2];
v[2] = v1[0] * v2[1] - v1[1] * v2[0];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* chosephase
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double chosephase (double sinvalue, double cosvalue)
{
double sv = sinvalue, cv = cosvalue;
if (sv >= 0 && cv >= 0)
return (asin (sv));
if (sv > 0 && cv < 0)
return (acos (cv));
if (sv < 0 && cv < 0)
return ( - asin (sv) + TWOPI / 2.0);
else
return (asin (sv) + TWOPI);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/****************************************************************************/
/* */
/* Functions for Runge-Kutta integrator */
/* */
/* Version: 2009-9-8 */
/* */
/* Copyright (c) 2009 shangkun@shao.ac.cn All Right Reserved */
/* */
/****************************************************************************/
/*
Version: 2009-9-8
Version: 2009-9-13 integrate forwards & backwards
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double rkf78 (double jd, double t, double h, double *x, int dim,
void (*fun)(int, double, double,double *,double *))
// double (*fun)(double, double,double *,double *))
/*
purpose: auto-adjusted Runge-Kutta-Ful... integrator
input: double h integration step
double t integrate from t to t+h
double *x x(t)
int dim dim(x)
double err tolerance of step control
double (*fun)() right(force) function
output: double *x x(t+h)
return: h new step after adjustment
*/
{
int i, j, n, flag = 0;
double *y, *k, *f, d = 0, tn;
double a[13] = { 0, 2.0/27, 1.0/9, 1.0/6, 5.0/12, 1.0/2, 5.0/6, 1.0/6,
2.0/3, 1.0/3, 1.0, 0, 1.0 };
double c[13] = { 0, 0, 0, 0, 0, 34.0/105, 9.0/35, 9.0/35, 9.0/280,
9.0/280, 0, 41.0/840, 41.0/840 };
double b[13][12] =
{
{0},
{2.0/27},
{1.0/36,1.0/12},
{1.0/24,0,1.0/8},
{5.0/12,0,-25.0/16,25.0/16},
{1.0/20,0,0,1.0/4,1.0/5},
{-25.0/108,0,0,125.0/108,-65.0/27,125.0/54},
{31.0/300,0,0,0,61.0/225,-2.0/9,13.0/900},
{2.0,0,0,-53.0/6,704.0/45,-107.0/9,67.0/90,3.0},
{-91.0/108,0,0,23.0/108,-976.0/135,311.0/54,-19.0/60,17.0/6,-1.0/12},
{2383.0/4100,0,0,-341.0/164,4496.0/1025,-301.0/82,2133.0/4100,
45.0/82,45.0/164,18.0/41},
{3.0/205,0,0,0,0,-6.0/41,-3.0/205,-3.0/41,3.0/41,6.0/41},
{-1777.0/4100,0,0,-341.0/164,4496.0/1025,-289.0/82,2193.0/4100,
51.0/82,33.0/164,12.0/41,0,1.0}
};
y = (double *) calloc (dim, sizeof(double));
k = (double *) calloc (dim*13, sizeof(double));
f = (double *) calloc (dim, sizeof(double));
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
do
{
for (i = 0; i <= 12; i++)
{
tn = t + a[i] * h;
for (n = 0; n <= dim - 1; n++)
{
y[n] = x[n];
for (j = 0; j <= i-1; j++)
y[n] = y[n] + h * b[i][j] * k[n*13+j];
}
fun (dim, jd, tn, y, f);
// fun (jd, tn, y, f);
for (n = 0; n <= dim - 1; n++)
{
k[n*13+i] = f[n];
}
}
d = 0;
for (n = 0; n <= dim - 1; n++)
{
d = d + fabs (41.0 / 840 * (k[n*13+0] + k[n*13+10]
- k[n*13+11] - k[n*13+12]) * h);
}
flag = 0;
}while (flag == 1);
for (n = 0; n <= dim - 1; n++)
{
for (i = 0; i <= 12; i++)
x[n] = x[n] + h * c[i] * k[n*13+i];
}
free (y);
free (f);
free (k);
return h;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* mt -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void mt (double *a, int m, int n, double *b)
{
int i, j;
for (i = 0; i <= m - 1; i++)
{
for (j = 0; j <= n - 1; j++)
b[j * m + i] = a[i * n + j];
}
return;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* brmul -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void brmul (double *a, double *b, int m,int n, int k,double *c)
{
int i, j, l, u;
for (i = 0; i <= m - 1; i++)
{
for (j = 0; j <= k - 1; j++)
{
u = i * k + j;
c[u] = 0.0;
for (l = 0; l <= n - 1; l++)
c[u] = c[u] + a[i * n + l] * b[l * k + j];
}
}
return;
}
void xyz2rtn(double *x, double *v, double *xyz, double *rtn)
{
double scal_x, scal_v, vr[3], vn[3], vt[3];
scal_x=sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2]);
scal_v=sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]);
// c...unit vector in R direction
vr[0]=x[0]/scal_x;
vr[1]=x[1]/scal_x;
vr[2]=x[2]/scal_x;
// c...unit direction in N direction
vn[0]=(vr[1]*v[2]-vr[2]*v[1])/scal_v;
vn[1]=(vr[2]*v[0]-vr[0]*v[2])/scal_v;
vn[2]=(vr[0]*v[1]-vr[1]*v[0])/scal_v;
// c...unit direction in T direction
vt[0]=(vn[1]*vr[2]-vn[2]*vr[1]);
vt[1]=(vn[2]*vr[0]-vn[0]*vr[2]);
vt[2]=(vn[0]*vr[1]-vn[1]*vr[0]);
// drtn(i,4)=dsqrt(dx[0]*dx[0]+dx[1]*dx[1]+dx[2]*dx[2])
rtn[0]=xyz[0]*vr[0]+xyz[1]*vr[1]+xyz[2]*vr[2];
rtn[1]=xyz[0]*vt[0]+xyz[1]*vt[1]+xyz[2]*vt[2];
rtn[2]=xyz[0]*vn[0]+xyz[1]*vn[1]+xyz[2]*vn[2];
return;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* lagrange interpolation order = 6, 2*order points
* @param1: description of param1
* @param2: description of param2
* todo
order = input parameter
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double lagrange (double *y, int dim_y, int dim_x, double t, double *z)
{
int i, j, k, m, dim, order = 8;
double s;
i = 0;
while ((y[i * dim_x] < t) && (i < dim_y))
i = i + 1;
k = i - order;
if (k < 0)
k = 0;
m = i + order - 1;
if (m > dim_y - 1)
m = dim_y - 1;
for (dim = 0; dim < dim_x - 1; dim++)
{
z[dim] = 0;
}
for (i = k; i <= m; i++)
{
s = 1.0;
for (j = k; j <= m; j++)
{
if (j != i)
{
s = s * (t - y[j * dim_x]) / (y[i * dim_x] - y[j * dim_x]);
}
}
for (dim = 0; dim < dim_x - 1; dim++)
{
z[dim] = z[dim] + s * y[i * dim_x + dim + 1];
}
}
return 0;
}
double obs_alt (double jd, double utc, double *obs, int part, double *bmat)
{
int n, lps, i;
double r, h, ref, *eph, *dxdp, *dodpo, *dodpd, *dodpp, xc2[6], dxdx0[36], dodx[6],
dodx0[6], tt, tjd[2], xsc[6], dx[3];
ref = RCT; //should be topography height
lps = getlps (JD0 + utc/86400.0);
tt = utc + (lps + 32.184);
// tjd[0] = JD0; tjd[1] = tt / 86400.0;
// get_ephemeris (tjd, 2, CT, xsc);
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tt, xc2);
}
if (part == 1)
{
eph = (double *) calloc (42 + 6 * MDYN, sizeof(double));
lagrange (OR_EPH, DIM_OR, 42 + 6 * MDYN + 1, tt, eph);
for (n = 0; n < 6; n++)
xc2[n] = eph[n];
for (n = 0; n < 36; n++)
dxdx0[n] = eph[n + 6];
if (MDYN > 0)
{
dxdp = (double *) calloc (6 * MDYN, sizeof(double));
dodpd = (double *) calloc (MDYN, sizeof(double));
for (n = 0; n < 6 * MDYN; n++)
dxdp[n] = eph[n + 42];
}
if (MOBS > 0)
dodpo = (double *) calloc (MOBS, sizeof(double));
free (eph);
}
h = modvect(xc2) - RCT;
for (n = 0; n < 3; n++)
dx[n] = xc2[n];
r = modvect(dx);
*obs = r - ref + BASB + BAST * utc;
if (part == 0)
return h;
for (n = 0; n < 3; n++)
{
dodx[n] = (xc2[n])/r;
dodx[n + 3] = 0;
}
brmul (dodx, dxdx0, 1, 6, 6, dodx0);
for (n = 0; n < 6; n++)
bmat[n] = dodx0[n];
if (MEST == 0)
return h;
i = 0;
if (MOBS > 0)
{
dodpo[i] = 1;
i++;
}
if (MOBS > 1)
{
dodpo[i] = utc;
i++;
}
if (MDYN > 0)
{
brmul (dodx, dxdp, 1, 6, MDYN, dodpd);
}
if (MOBS > 0)
for (n = 0; n < MOBS; n++)
bmat[6 + n] = dodpo[n];
if (MDYN > 0)
for (n = 0; n < MDYN; n++)
bmat[6 + MOBS + n] = dodpd[n];
if (MDYN > 0)
{
free (dxdp);
free (dodpd);
}
if (MOBS > 0)
free (dodpo);
return h;
}
double obs_vel (double jd, double utc, double *obs, int part, double *bmat)
{
int n, lps, i;
double v, h, *eph, *dxdp, *dodpo, *dodpd, *dodpp, xc2[6], dxdx0[36], dodx[6],
dodx0[6], tt, tjd[2], xsc[6], dv[3];
lps = getlps (JD0 + utc/86400.0);
tt = utc + (lps + 32.184);
tjd[0] = JD0; tjd[1] = tt / 86400.0;
get_ephemeris (tjd, 2, CT, xsc);
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tt, xc2);
}
if (part == 1)
{
eph = (double *) calloc (42 + 6 * MDYN, sizeof(double));
lagrange (OR_EPH, DIM_OR, 42 + 6 * MDYN + 1, tt, eph);
for (n = 0; n < 6; n++)
xc2[n] = eph[n];
for (n = 0; n < 36; n++)
dxdx0[n] = eph[n + 6];
if (MDYN > 0)
{
dxdp = (double *) calloc (6 * MDYN, sizeof(double));
dodpd = (double *) calloc (MDYN, sizeof(double));
for (n = 0; n < 6 * MDYN; n++)
dxdp[n] = eph[n + 42];
}
if (MOBS > 0)
dodpo = (double *) calloc (MOBS, sizeof(double));
free (eph);
}
h = modvect(xc2) - RCT;
for (n = 0; n < 3; n++)
dv[n] = xc2[n + 3] - xsc[n + 3];
v = modvect(dv);
*obs = v + BASB + BAST * utc;
if (part == 0)
return h;
for (n = 0; n < 3; n++)
{
dodx[n] = 0;
dodx[n + 3] = (xc2[n + 3] - xsc[n + 3])/v;
}
brmul (dodx, dxdx0, 1, 6, 6, dodx0);
for (n = 0; n < 6; n++)
bmat[n] = dodx0[n];
if (MEST == 0)
return h;
i = 0;
if (MOBS > 0)
{
dodpo[i] = 1;
i++;
}
if (MOBS > 1)
{
dodpo[i] = utc;
i++;
}
if (MDYN > 0)
{
brmul (dodx, dxdp, 1, 6, MDYN, dodpd);
}
if (MOBS > 0)
for (n = 0; n < MOBS; n++)
bmat[6 + n] = dodpo[n];
if (MDYN > 0)
for (n = 0; n < MDYN; n++)
bmat[6 + MOBS + n] = dodpd[n];
if (MDYN > 0)
{
free (dxdp);
free (dodpd);
}
if (MOBS > 0)
free (dodpo);
return h;
}
double obs_dsn (double jd, double utc, double *obs, int part, double *bmat)
{
int n, lps, i;
double r, h, *eph, *dxdp, *dodpo, *dodpd, *dodpp, xc2[6], dxdx0[36], dodx[6],
dodx0[6], tt, tjd[2], xsc[6], dx[3];
lps = getlps (JD0 + utc/86400.0);
tt = utc + (lps + 32.184);
tjd[0] = JD0; tjd[1] = tt / 86400.0;
get_ephemeris (tjd, 2, CT, xsc);
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tt, xc2);
}
if (part == 1)
{
eph = (double *) calloc (42 + 6 * MDYN, sizeof(double));
lagrange (OR_EPH, DIM_OR, 42 + 6 * MDYN + 1, tt, eph);
for (n = 0; n < 6; n++)
xc2[n] = eph[n];
for (n = 0; n < 36; n++)
dxdx0[n] = eph[n + 6];
if (MDYN > 0)
{
dxdp = (double *) calloc (6 * MDYN, sizeof(double));
dodpd = (double *) calloc (MDYN, sizeof(double));
for (n = 0; n < 6 * MDYN; n++)
dxdp[n] = eph[n + 42];
}
if (MOBS > 0)
dodpo = (double *) calloc (MOBS, sizeof(double));
free (eph);
}
h = modvect(xc2) - RCT;
for (n = 0; n < 3; n++)
dx[n] = xc2[n] - xsc[n];
r = modvect(dx);
*obs = r + BASB + BAST * utc;
if (part == 0)
return h;
for (n = 0; n < 3; n++)
{
dodx[n] = (xc2[n] - xsc[n])/r;
dodx[n + 3] = 0;
}
brmul (dodx, dxdx0, 1, 6, 6, dodx0);
for (n = 0; n < 6; n++)
bmat[n] = dodx0[n];
if (MEST == 0)
return h;
i = 0;
if (MOBS > 0)
{
dodpo[i] = 1;
i++;
}
if (MOBS > 1)
{
dodpo[i] = utc;
i++;
}
if (MDYN > 0)
{
brmul (dodx, dxdp, 1, 6, MDYN, dodpd);
}
if (MOBS > 0)
for (n = 0; n < MOBS; n++)
bmat[6 + n] = dodpo[n];
if (MDYN > 0)
for (n = 0; n < MDYN; n++)
bmat[6 + MOBS + n] = dodpd[n];
if (MDYN > 0)
{
free (dxdp);
free (dodpd);
}
if (MOBS > 0)
free (dodpo);
return h;
}
void getsolvefor ()
{
// MOBS = 0; //2;
// MSRP = 0; //2;
// MTK2 = 0; //1;
// MGCS = 1; //6;
MDYN = MSRP + MTK2 + MGCS; // dim of sensitivity matrix
MSOL = MOBS + MDYN + 6; // dim of regress matrix
MEST = MOBS + MDYN; //= MOBS + MDYN - 6
MSTA = 42 + MDYN * 6;
/*
if (MGCS > 0)
{
CSinfo = (CSStruct *) calloc ( MGCS, sizeof(CSStruct));
CSinfo[0].n = 2; CSinfo[0].m = 0; CSinfo[0].cs = 0;
// CSinfo[1].n = 3; CSinfo[1].m = 0; CSinfo[1].cs = 0;
}
*/
return;
}
void initsolvefor (double *xsm, double *x)
{
int i, k, n, m, ind, l, ic, is, label;
for (k = 0; k < 6; k ++)
{
x[k] = xsm[k];
}
i = 6;
if (MOBS > 0)
{
x[i] = BASB;
i++;
}
if (MOBS > 1)
{
x[i] = BAST;
i++;
}
if (MSRP > 0)
{
x[i] = SRPB;
i++;
}
if (MSRP > 1)
{
x[i] = SRPT ;
i++;
}
if (MTK2 > 0)
{
x[i] = K2;
i++;
}
if (MGCS > 0)
for (k = 0; k < MGCS; k ++)
{
n = CSinfo[k].n; m = CSinfo[k].m; label = CSinfo[k].cs;
if (m == 0)
{
x[i] = COEFG[n] + CSinfo[k].initv;
COEFG[n] = x[i];
}
else
{
l = NMAX - m + 1;
ind = NMAX + 1 + (2 * NMAX - m + 2) * (m - 1);
ic = ind + n - m;
is = ind + n - m + l;
if (label == 1)
{
x[i] = COEFG[ic] + CSinfo[k].initv;
COEFG[ic] = x[i];
}
if (label == -1)
{
x[i] = COEFG[is] + CSinfo[k].initv;
COEFG[is] = x[i];
}
}
i++;
}
return;
}
void updsolvefor (double *x)
{
int i, k, n, m, ind, l, ic, is, label;
i = 6;
if (MOBS > 0)
{
BASB = x[i];
i++;
}
if (MOBS > 1)
{
BAST = x[i];
i++;
}
if (MSRP > 0)
{
SRPB = x[i];
i++;
}
if (MSRP > 1)
{
SRPT = x[i];
i++;
}
if (MTK2 > 0)
{
K2 = x[i];
i++;
}
if (MGCS > 0)
for (k = 0; k < MGCS; k ++)
{
n = CSinfo[k].n; m = CSinfo[k].m; label = CSinfo[k].cs;
if (m == 0)
{
COEFG[n] = x[i];
}
else
{
l = NMAX - m + 1;
ind = NMAX + 1 + (2 * NMAX - m + 2) * (m - 1);
ic = ind + n - m;
is = ind + n - m + l;
if (label == 1)
{
COEFG[ic] = x[i];
}
if (label == -1)
{
COEFG[is] = x[i];
}
}
i++;
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* simula_phase - simulate total phase count observable
* @param1: description of param1
* @param2: description of param2
* todo:
1 one-way doppler deltat accumlated error
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double simula_phase (double utc3, double utc0, double *station3,
short int uplink, double *station1, short int genrel,
double *calculable, double *azimuth, double *elevation,
short int part, double *bmat)
{
double txice[7], txics[7], *bmats, *bmate, deltat;
int n;
real128 lts2[3], lte2[3];
bmats = (double *) calloc ( SLOVEFOR, sizeof(double));
bmate = (double *) calloc ( SLOVEFOR, sizeof(double));
ltsolution (utc0, station3, uplink, station1, genrel, lts2,
azimuth, elevation, part, bmats, txics);
ltsolution (utc3, station3, uplink, station1, genrel, lte2,
azimuth, elevation, part, bmate, txice);
*calculable = (lte2[2] - lts2[2]) * C;
if (uplink == 0) //one-way doppler deltat, time correction
{
delta_tdb (txice, txics, &deltat);
*calculable = *calculable + deltat * (txice[0] - txics[0]) * C;
}
if (part == 1)
{
for (n = 0; n < 6 + DYNPAR; n++)
{
bmat[n] = bmate[n] - bmats[n];
}
bmat[8] = 1;
bmat[9] = utc3;
}
*calculable = *calculable + BIAS + DBIA * utc3;
free (bmats);
free (bmate);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* simula_dople - simulate doppler observable
* @param1: description of param1
* @param2: description of param2
* todo:
1 one-way doppler deltat accumlated error
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double simula_dople (double utc3, double tc, double *station3,
short int uplink, double *station1, short int genrel,
double *calculable, double *azimuth, double *elevation,
short int part, double *bmat)
{
double txice[7], txics[7], *bmats, *bmate, deltat;
double dop_old, dop_new;
int n;
real128 lts2[3], lte2[3], dlt;
bmats = (double *) calloc ( SLOVEFOR, sizeof(double));
bmate = (double *) calloc ( SLOVEFOR, sizeof(double));
ltsolution (utc3 + tc / 2, station3, uplink, station1, genrel, lte2,
azimuth, elevation, part, bmate, txice);
ltsolution (utc3 - tc / 2, station3, uplink, station1, genrel, lts2,
azimuth, elevation, part, bmats, txics);
dop_old = (double) ((lte2[1] - lts2[1]) / (real128)tc);
dlt = lte2[2] - lts2[2];
dop_new = (double) (dlt / (real128)tc * (real128)C);
*calculable = dop_new;
if (uplink == 0) //one-way doppler deltat, proper time correction
{
delta_tdb (txice, txics, &deltat);
*calculable = *calculable + deltat * C;
}
if (part == 1)
{
for (n = 0; n < 6 + DYNPAR; n++)
{
bmat[n] = (bmate[n] - bmats[n]) / tc;
}
bmat[8] = 1;
bmat[9] = utc3;
}
*calculable = *calculable + BIAS + DBIA * utc3;
free (bmats);
free (bmate);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* delta_tdb - one-way doppler deltat, proper time correction
* txice: [0]: satellite TDB time(s), [1]~[7]satellite coordinates(AU, day)
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double delta_tdb (double *txice, double *txics, double *deltat)
{
double ie, is, t, ied, isd, tjde[2], tjds[2], xcbe[6], xcbs[6],
xibe[6], xibs[6];
short int ssbary, n;
ssbary = 11;
t = txice[0] - txics[0];
tjde[0] = JD0;
tjde[1] = txice[0] / 86400.0;
tjds[0] = JD0;
tjds[1] = txics[0] / 86400.0;
planet_ephemeris (tjde, CENTER, ssbary, &xcbe[0], &xcbe[3]);
for (n = 0; n < 6; n++)
xibe[n] = txice[n + 1] + xcbe[n];
planet_ephemeris (tjds, CENTER, ssbary, &xcbs[0], &xcbs[3]);
for (n = 0; n < 6; n++)
xibs[n] = txics[n + 1] + xcbe[n];
delta_iid (tjde, xibe, &ie, &ied);
delta_iid (tjds, xibs, &is, &isd);
*deltat = 1.0 / 2.0 * (ie + is) * t - 1.0 / 12.0 * (ied - isd) * t * t;
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* delta_tdb - one-way light-time proper time correction
* txice: [0]: satellite TDB time(s), [1]~[7]satellite coordinates(AU, day)
* @param2: description of param2
* todo:
1 Uobl, time correction due to non-spherical potential
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double delta_iid (double *jd, double *xi, double *ii, double *id)
{
double uu, vv, L, ud, vd,
rdd[3], xj[11][6], xij[11][6], rij[11], rijd[11];
short int ssbary, j, n;
ssbary = 11;
L = 1.550520e-8;
uu = 0;
ud = 0;
for (n = 0; n < 3; n ++)
rdd[n] = 0;
for (j = 0; j <= 10; j++)
{
if (PERB[j] == 0)
continue;
planet_ephemeris (jd, j, ssbary, &xj[j][0], &xj[j][3]);
for (n = 0; n < 6; n++)
{
xij[j][n] = xi[n] - xj[j][n];
}
rij[j] = sqrt (xij[j][0] * xij[j][0]
+ xij[j][1] * xij[j][1] + xij[j][2] * xij[j][2]);
rijd[j] = (xij[j][0] * xij[j][3] + xij[j][1] * xij[j][4]
+ xij[j][2] * xij[j][5]) / rij[j];
uu = uu + GMDE[j] / rij[j];
ud = ud - GMDE[j] / rij[j] / rij[j] * rijd[j];
for (n = 0; n < 3; n++)
rdd[n] = rdd[n]
- GMDE[j] / (rij[j] * rij[j] * rij[j]) * xij[j][n];
}
vv = xi[3] * xi[3] + xi[4] * xi[4] + xi[5] * xi[5];
vd = 2.0 * (xi[3] * rdd[0] + xi[4] * rdd[1] + xi[5] * rdd[2]);
*ii = (uu + vv / 2.0) / C_AUDAY / C_AUDAY - L;
*id = (ud + vd / 2.0) / C_AUDAY / C_AUDAY / 86400.0;
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* simula_range - simulate doppler observable
* @param1: description of param1
* @param2: description of param2
* todo:
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double simula_range (double utc3, double *station3, short int uplink,
double *station1, short int genrel,
double *calculable, double *azimuth, double *elevation,
short int part, double *bmat)
{
double txic[7];
real128 lt[3];
ltsolution (utc3, station3, uplink, station1, genrel, lt,
azimuth, elevation, part, bmat, txic);
if (uplink == 0) //one-way range: only work for near-earth satellite
{
*calculable = (double) lt[0];
}
if (uplink == 1) //two/three-way range: work for deep space
{
*calculable = (double) (lt[2] * (real128)C);
}
if (part == 1)
{
bmat[8] = 1; //DBIAS
bmat[9] = utc3;
}
*calculable = *calculable + BIAS + DBIA * utc3;
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* ltsolution - light time solution
* @param:
utc3 : unit: day;
station3 : receive station;
uplink : no uplink == 0; yes uplink == 1
station1 : transmit station;
genrel : no general relativity correction == 0; yes == 1
calculable : ;
azimuth : ;
elevation : ;
partial : no partial == 0 ; yes partial == 1
bmat : (partial == 0: satellite coordinates(6), partial == 1: partial)
txic : satellite coordinates(t2)
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double ltsolution (double utc_3, double *station3, short int uplink,
double *station1, short int genrel, real128 *lt,
double *azimuth, double *elevation, short int part,
double *bmat, double *txic)
{
double re3fi[3], re3[3], re1fi[3], re1[3], vec[3], xe3[6], re3n[3],
xe1[6], re1n[3], xc2[6],
secdiff,
ra, dec, zd, az, secdiff3, secdiff1,
utc_1, ut1_3, ut1_1, tt_3, tt_1, tdb_3, tdb_2, tdb_1,
ut1_utc, xp, yp, xp3, yp3, dx, dy, delta_t3, t, elong, u, v,
dxdx0[36], dxdp[6 * DYNPAR], dodx[6], dodp[DYNPAR],
dodx0[6], dodpp[DYNPAR], eph[42 + 6 * DYNPAR],
te[9], llh3[3], llh1[3];
real128 tao231, tao232, tao121, tao122, taoerr, r23, r12, xb3[6], xb2[6], xb1[6];
int n, flag, dim_par;
taoerr = 1.0e-12L; //1nanosec;
// taoerr = 1.0e-8L; //1nanosec;
dim_par = 42 + 6 * DYNPAR + 1;
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--light time iteration 2 -> 3 --*/
re3fi[0] = station3[0];
re3fi[1] = station3[1];
re3fi[2] = station3[2];
xyz2llh(re3fi, llh3);
elong = llh3[1] * DEG2RAD;
u = sqrt (re3fi[0] * re3fi[0] + re3fi[1] * re3fi[1]) / 1000.0;
v = re3fi[2] / 1000.0;
/*--time scales transformation --*/
geteop (utc_3, &xp3, &yp3, &ut1_utc, &dx, &dy);
delta_t3 = 32.184 + LEAPSECS - ut1_utc;
ut1_3 = utc_3 + ut1_utc;
tt_3 = utc_3 + (LEAPSECS + 32.184);
secdiff3 = iauDtdb (JD0, tt_3 / 86400.0, ut1_3 / 86400.0, elong, u, v);
tdb_3 = tt_3 + secdiff3;
/*--station coordinate interpolation--*/
lagrange (TE_EPH, DIM_TE, 10, utc_3, te);
brmul (te, re3fi, 3, 3, 1, re3);
lagrange (TE_EPH, DIM_TE, 10, utc_3 + 1.0, te);
brmul (te, re3fi, 3, 3, 1, re3n);
for (n = 0; n < 3; n++)
{
xe3[n] = re3[n] / AU;
xe3[n + 3] = (re3n[n] - re3[n]) / AU * 86400;
}
/*--satellite coordinate interpolation--*/
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tdb_3, xc2);
}
if (part == 1)
{
lagrange (OR_EPH, DIM_OR, dim_par, tdb_3, eph);
for (n = 0; n < 6; n++)
xc2[n] = eph[n + 36];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--iteration--*/
r23 = lt_form (tdb_3, tdb_3, xe3, xc2, genrel, xb3, xb2);
tao231 = (real128)r23 * (real128)AU_SEC;
tdb_2 = tdb_3 - (double) tao231;
flag = -1;
do
{
flag++;
tao232 = tao231;
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tdb_2, xc2);
}
if (part == 1)
{
lagrange (OR_EPH, DIM_OR, dim_par, tdb_2, eph);
for (n = 0; n < 6; n++)
xc2[n] = eph[n + 36];
}
r23 = lt_form (tdb_3, tdb_2, xe3, xc2, genrel, xb3, xb2);
tao231 = (real128)r23 * (real128)AU_SEC;
tdb_2 = tdb_3 - (double) tao231;
}while (fabsl (tao232-tao231) > taoerr);
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--light time iteration 1 -> 2--*/
if (uplink == 1)
{
re1fi[0] = station1[0];
re1fi[1] = station1[1];
re1fi[2] = station1[2];
xyz2llh(re1fi, llh1);
elong = llh1[1] * DEG2RAD;
u = sqrt (re1fi[0] * re1fi[0] + re1fi[1] * re1fi[1]) / 1000.0;
v = re1fi[2] / 1000.0;
/*--time scales transformation --*/
tdb_1 = tdb_2; //unit: s
tdb2tt (JD0 + tdb_1 / 86400.0, &t, &secdiff);
tt_1 = tdb_1 - secdiff;
utc_1 = tt_1 - (LEAPSECS + 32.184);
geteop (utc_1, &xp, &yp, &ut1_utc, &dx, &dy);
ut1_1 = utc_1 + ut1_utc;
secdiff1 = iauDtdb (JD0, tdb_1 / 86400.0, ut1_1 / 86400.0,
elong, u, v);
tt_1 = tdb_1 - secdiff1;
utc_1 = tt_1 - (LEAPSECS + 32.184);
/*--station coordinate interpolation--*/
lagrange (TE_EPH, DIM_TE, 10, utc_1, te);
brmul (te, re1fi, 3, 3, 1, re1);
lagrange (TE_EPH, DIM_TE, 10, utc_1 + 1.0, te);
brmul (te, re1fi, 3, 3, 1, re1n);
for (n = 0; n < 3; n++)
{
xe1[n] = re1[n] / AU;
xe1[n + 3] = (re1n[n] - re1[n]) / AU * 86400;
}
r12 = lt_form (tdb_1, tdb_2, xe1, xc2, genrel, xb1, xb2);
tao121 = (real128)r12 * (real128)AU_SEC;
tdb_1 = tdb_2 - (double) tao121;
/*--iteration--*/
flag = -1;
do
{
flag++;
tao122 = tao121;
/*--time scales transformation --*/
tdb2tt (JD0 + tdb_1 / 86400.0, &t,&secdiff);
tt_1 = tdb_1 - secdiff;
utc_1 = tt_1 - (LEAPSECS + 32.184);
geteop (utc_1, &xp, &yp, &ut1_utc, &dx, &dy);
ut1_1 = utc_1 + ut1_utc;
secdiff1 = iauDtdb (JD0, tdb_1 / 86400.0, ut1_1 / 86400.0,
elong, u, v);
tt_1 = tdb_1 - secdiff1;
utc_1 = tt_1 - (LEAPSECS + 32.184);
/*--station coordinate interpolation--*/
lagrange (TE_EPH, DIM_TE, 10, utc_1, te);
brmul (te, re1fi, 3, 3, 1, re1);
lagrange (TE_EPH, DIM_TE, 10, utc_1 + 1.0, te);
brmul (te, re1fi, 3, 3, 1, re1n);
for (n = 0; n < 3; n++)
{
xe1[n] = re1[n] / AU;
xe1[n + 3] = (re1n[n] - re1[n]) / AU * 86400;
}
r12 = lt_form (tdb_1, tdb_2, xe1, xc2, genrel, xb1, xb2);
tao121 = (real128)r12 * (real128)AU_SEC;
tdb_1 = tdb_2 - (double) tao121;
}while (fabsl (tao122-tao121) > taoerr);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*-- partial time: tdb2 --*/
if (part == 0)
{
lagrange (OR_EPH, DIM_OR, 7, tdb_2, bmat);
}
if (part == 1)
{
lagrange (OR_EPH, DIM_OR, dim_par, tdb_2, eph);
for (n = 0; n < 36; n++)
dxdx0[n] = eph[n];
for (n = 0; n < 6 * DYNPAR; n++)
dxdp[n] = eph[n + 42];
lt_part (xb3, xb2, xb1, uplink, dodx, dodp);
brmul (dodx, dxdx0, 1, 6, 6, dodx0);
brmul (dodx, dxdp, 1, 6, DYNPAR, dodpp);
for (n = 0; n < DYNPAR; n++)
dodp[n] = dodpp[n] + dodp[n];
for (n = 0; n < 3; n++)
{
bmat[n] = dodx0[n];
bmat[n + 3] = dodx0[n + 3] * 86400.0;
}
bmat[6] = dodp[0] * AU; // l/c: au,
bmat[7] = dodp[1] * AU * 86400.0; // l/(c*d-1): au*d,
}
txic[0] = tdb_2;
for (n = 1; n < 7; n++)
{
txic[n] = xc2[n - 1];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*--calculate light time observable --*/
if (uplink == 0)
{
lt[0] = (real128)r23 * (real128)AU;
lt[1] = ((real128)utc_3 - (real128)tdb_2) * (real128)C;
lt[2] = (real128)tao231 - (real128)secdiff3 - (real128)(LEAPSECS + 32.184);
}
if (uplink == 1)
{
lt[0] = ((real128)r12 + (real128)r23) * (real128)AU;
lt[1] = ((real128)utc_3 - (real128)utc_1) * (real128)C;
lt[2] = (real128)tao231 + (real128)tao121 + (real128)secdiff1 - (real128)secdiff3;
// lt[2] = tao231 + tao121 + secdiff1 - secdiff3;
}
for (n = 0; n < 3; n++)
vec[n] = xb2[n] - xb3[n];
vector2radec (vec, &ra,&dec);
azelev (ut1_3 / 86400.0 + JD0, delta_t3, ACCURACY,
xp3, yp3, llh3, ra, dec, &zd, &az);
*azimuth = az;
*elevation = 90.0 - zd;
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* lt_part - partial of light time
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double lt_part (real128 *xb3, real128 *xb2, real128 *xb1, int uplink,
double *dodx, double *dodp)
{
double r23, r12, p23, p12, pt2, pt1, rp12, rpt1, dt2dx[3], dt1dx[3];
int n;
r23 = sqrt ((xb2[0] - xb3[0]) * (xb2[0] - xb3[0])
+ (xb2[1] - xb3[1]) * (xb2[1] - xb3[1])
+ (xb2[2] - xb3[2]) * (xb2[2] - xb3[2]));
p23 = ((xb3[0] - xb2[0]) * xb2[3] + (xb3[1] - xb2[1]) * xb2[4]
+ (xb3[2] - xb2[2]) * xb2[5]) / r23;
pt2 = (1 - p23 / C_AUDAY);
dt2dx[0] = (xb3[0] - xb2[0]) / r23 / pt2;
dt2dx[1] = (xb3[1] - xb2[1]) / r23 / pt2;
dt2dx[2] = (xb3[2] - xb2[2]) / r23 / pt2;
if (uplink == 0)
{
dodx[0] = - dt2dx[0];
dodx[1] = - dt2dx[1];
dodx[2] = - dt2dx[2];
}
if (uplink == 1)
{
r12 = sqrt ((xb2[0] - xb1[0]) * (xb2[0] - xb1[0])
+ (xb2[1] - xb1[1]) * (xb2[1] - xb1[1])
+ (xb2[2] - xb1[2]) * (xb2[2] - xb1[2]));
p12 = ((xb2[0] - xb1[0]) * xb1[3] + (xb2[1] - xb1[1]) * xb1[4]
+ (xb2[2] - xb1[2]) * xb1[5]) / r12;
pt1 = (1 - p12 / C_AUDAY);
rp12 = ((xb2[0] - xb1[0]) * (xb2[3] - xb1[3])
+ (xb2[1] - xb1[1]) * (xb2[4] - xb1[4])
+ (xb2[2] - xb1[2]) * (xb2[5] - xb1[5])) / r12;
rpt1 = (1 - (rp12 + p12) / C_AUDAY);
for (n = 0; n < 3; n++)
{
dt1dx[n] = (dt2dx[n] * rpt1 - (xb2[n] - xb1[n]) / r12) / pt1;
}
for (n = 0; n < 3; n++)
{
dodx[n] = - dt1dx[n];
}
}
dodx[3] = 0;
dodx[4] = 0;
dodx[5] = 0;
dodp[0] = 0; //
dodp[1] = 0; //
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* lt_form - calculate the light time equation
* @param:
tdb3, re3[3] : station time, coordinates (unit:AU)
tdb2, rp2[3] : satellite time, coordinates (AU)
genrel :
*rs3 : output: station coordinates to SSB (AU)
*rs2 : output: satellite coordinates to SSB (AU)
return : light time solution (AU)
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
real128 lt_form (double tdb3, double tdb2, double *re3, double *rp2,
int genrel, real128 *rs3, real128 *rs2)
{
double gamma, tjd2[2], tjd3[2], re[3], ve[3],
rp[3], vp[3], xe[6], xp[6];
real128 rlight, rgen, ri, rj, r12, rse[3], rsp[3], r23[3], rlt[3];
short int earth = 2, sun = 10, j;
int n;
gamma = 1;
tjd2[0] = JD0;
tjd3[0] = JD0;
tjd2[1] = tdb2 / 86400.0;
tjd3[1] = tdb3 / 86400.0;
planet_ephemeris (tjd3, earth, sun, &xe[0], &xe[3]);
for (n = 0; n < 6; n++)
rs3[n] = (real128)re3[n] + (real128)xe[n];
ri = sqrtl (rs3[0] * rs3[0] + rs3[1] * rs3[1] + rs3[2] * rs3[2]);
planet_ephemeris (tjd2, CENTER, sun, &xp[0], &xp[3]);
for (n = 0; n < 6; n++)
rs2[n] = (real128)rp2[n] + (real128)xp[n];
rj = sqrtl (rs2[0] * rs2[0] + rs2[1] * rs2[1] + rs2[2] * rs2[2]);
for (n = 0; n < 3; n++)
r23[n] = (real128)xe[n] - (real128)xp[n];
for (n = 0; n < 3; n++)
rlt[n] = (real128)r23[n] + (real128)re3[n] - (real128)rp2[n];
// rlight = sqrtl (((real128)rs3[0] - (real128)rs2[0]) * ((real128)rs3[0] - (real128)rs2[0])
// + ((real128)rs3[1] - (real128)rs2[1]) * ((real128)rs3[1] - (real128)rs2[1])
// + ((real128)rs3[2] - (real128)rs2[2]) * ((real128)rs3[2] - (real128)rs2[2]));
rlight = sqrtl( rlt[0] * rlt[0] + rlt[1] * rlt[1] + rlt[2] * rlt[2]);
rlight = sqrtl ((rs3[0] - rs2[0]) * (rs3[0] - rs2[0])
+ (rs3[1] - rs2[1]) * (rs3[1] - rs2[1])
+ (rs3[2] - rs2[2]) * (rs3[2] - rs2[2]));
if (genrel == 1)
{
rgen = (1L + (real128)gamma) * (real128)GMDE[10] / (real128)C_AUDAY / (real128)C_AUDAY
* logl ((ri + rj + rlight
+ (1L + (real128)gamma) * (real128)GMDE[10] / (real128)C_AUDAY / (real128)C_AUDAY)
/ (ri + rj - rlight
+ (1L + (real128)gamma) * (real128)GMDE[10] / (real128)C_AUDAY / (real128)C_AUDAY));
rlight = rlight + rgen;
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
for (j = 0; j <= 9; j++)
{
// if (j ==9) continue;
planet_ephemeris (tjd3, earth, j, re, ve);
for (n = 0; n < 3; n++)
rse[n] = (real128)re3[n] + (real128)re[n];
ri = sqrtl (rse[0] * rse[0] + rse[1] * rse[1] + rse[2] * rse[2]);
planet_ephemeris (tjd2, CENTER, j, rp, vp);
for (n = 0; n < 3; n++)
rsp[n] = (real128)rp2[n] + (real128)rp[n];
rj = sqrtl (rsp[0] * rsp[0] + rsp[1] * rsp[1] + rsp[2] * rsp[2]);
r12 = sqrtl((rse[0] - rsp[0]) * (rse[0] - rsp[0])
+ (rse[1] - rsp[1]) * (rse[1] - rsp[1])
+ (rse[2] - rsp[2]) * (rse[2] - rsp[2]));
rgen = (1L + (real128)gamma) * (real128)GMDE[j] / (real128)C_AUDAY / (real128)C_AUDAY
* logl ((ri + rj + r12 ) / (ri + rj - r12));
rlight = rlight + rgen;
}
}
return rlight;
}
void azelev (double jd_ut1, double delta_t, short int accuracy,
double x, double y, double *llh, double ra,
double dec, double *zd, double *az)
{
double sinlat, coslat, sinlon, coslon, sindc, cosdc, sinra, cosra,
uze[3], une[3], uwe[3], uz[3], un[3], uw[3], p[3], pz, pn, pw,
proj;
/*
Preliminaries.
*/
sinlat = sin (llh[0] * DEG2RAD);
coslat = cos (llh[0] * DEG2RAD);
sinlon = sin (llh[1] * DEG2RAD);
coslon = cos (llh[1] * DEG2RAD);
sindc = sin (dec * DEG2RAD);
cosdc = cos (dec * DEG2RAD);
sinra = sin (ra * 15.0 * DEG2RAD);
cosra = cos (ra * 15.0 * DEG2RAD);
/*
Set up orthonormal basis vectors in local Earth-fixed system.
Define vector toward local zenith in Earth-fixed system (z axis).
*/
uze[0] = coslat * coslon;
uze[1] = coslat * sinlon;
uze[2] = sinlat;
/*
Define vector toward local north in Earth-fixed system (x axis).
*/
une[0] = -sinlat * coslon;
une[1] = -sinlat * sinlon;
une[2] = coslat;
/*
Define vector toward local west in Earth-fixed system (y axis).
*/
uwe[0] = sinlon;
uwe[1] = -coslon;
uwe[2] = 0.0;
/*
Obtain vectors in celestial system.
Rotate Earth-fixed orthonormal basis vectors to celestial system
(wrt equator and equinox of date).
*/
ter2cel (jd_ut1,0.0,delta_t,1,accuracy,1,x,y,uze, uz);
ter2cel (jd_ut1,0.0,delta_t,1,accuracy,1,x,y,une, un);
ter2cel (jd_ut1,0.0,delta_t,1,accuracy,1,x,y,uwe, uw);
/*
Define unit vector 'p' toward object in celestial system
(wrt equator and equinox of date).
*/
p[0] = cosdc * cosra;
p[1] = cosdc * sinra;
p[2] = sindc;
/*
Compute coordinates of object wrt orthonormal basis.
Compute components of 'p' - projections of 'p' onto rotated
Earth-fixed basis vectors.
*/
pz = p[0] * uz[0] + p[1] * uz[1] + p[2] * uz[2];
pn = p[0] * un[0] + p[1] * un[1] + p[2] * un[2];
pw = p[0] * uw[0] + p[1] * uw[1] + p[2] * uw[2];
/*
Compute azimuth and zenith distance.
*/
proj = sqrt (pn * pn + pw * pw);
if (proj > 0.0)
*az = -atan2 (pw, pn) * RAD2DEG;
if (*az < 0.0)
*az += 360.0;
if (*az >= 360.0)
*az -= 360.0;
*zd = atan2 (proj, pz) * RAD2DEG;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_pointmass - abandoned
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double fun_pointmass (double tdbs, double *x, double *f)
{
double fnt[3], fgr[3], r, s2, rrd, a, b;
int n, gamma;
gamma = 1;
f[0] = x[3];
f[1] = x[4];
f[2] = x[5];
r = sqrt (x[0]*x[0]+x[1]*x[1]+x[2]*x[2]);
s2 = x[3] * x[3] + x[4] * x[4] + x[5] * x[5];
rrd = x[0] * x[3] + x[1] * x[4] + x[2] * x[5];
a = 2 * (1 + gamma) * GMDE[CENTER] / r - gamma * s2;
b = 2 * (1 + gamma) * rrd;
for (n = 0; n < 3; n++)
fgr[n] = GMDE[CENTER] / C_AUDAY / C_AUDAY / r / r / r
* ( a * x[n] + b * x[n+3] );
fnt[0] = - GMDE[CENTER] / (r*r*r) * x[0];
fnt[1] = - GMDE[CENTER] / (r*r*r) * x[1];
fnt[2] = - GMDE[CENTER] / (r*r*r) * x[2];
for (n = 0; n < 3; n++)
{
f[3 + n] = fnt[n] + fgr[n];
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_fullaccel -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double fun_fullaccel (double tdbs, double *xic, double *fxic)
{
int n;
short int ssbary = 11, part = 0;
double tjd[2], acc1[3], acc2[3], acc3[3], acc[3], dum1[1], dum2[1];
tjd[0] = JD0;
tjd[1] = tdbs;
accel_ntrel (tjd, xic, part, acc1, dum1, dum2);
accel_nonsp (tjd, xic, part, acc2, dum1, dum2);
accel_radpr (tjd, xic, part, acc3, dum1, dum2);
for (n = 0; n <= 2; n++)
{
acc[n] = acc1[n] + acc2[n] + acc3[n];
}
fxic[0] = xic[3];
fxic[1] = xic[4];
fxic[2] = xic[5];
fxic[3] = acc[0];
fxic[4] = acc[1];
fxic[5] = acc[2];
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* fun_fullstate -transition matrix(36), orbit(6), sensitivity matrix(6*DYNPAR)
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double fun_fullstate (double tdbs, double *state, double *fstate)
{
int n;
short int ssbary = 11, part = 1;
double tjd[2], xic[6], dfdx[36], dxdx0[36], dfdp[6 * DYNPAR],
dfdpp[6 * DYNPAR], dxdp[6 * DYNPAR],
acc1[3], dadr1[9], dadp1[3 * DYNPAR],
acc2[3], dadr2[9], dadp2[3 * DYNPAR],
acc3[3], dadr3[9], dadp3[3 * DYNPAR],
acc[3], dadr[9], dadp[3 * DYNPAR],
fxic[6], fdxdx0[36], fdxdp[6 * DYNPAR];
tjd[0] = JD0;
tjd[1] = tdbs;
for (n = 0; n < 36; n++)
{
dxdx0[n] = state[n];
}
for (n = 0; n < 6; n++)
{
xic[n] = state[n + 36];
}
for (n = 0; n < 6 * DYNPAR; n++)
{
dxdp[n] = state[n + 42];
}
/* acc, partial to xyz: dadr, partial to parameters dadp*/
accel_ntrel (tjd, xic, part, acc1, dadr1, dadp1);
accel_nonsp (tjd, xic, part, acc2, dadr2, dadp2);
accel_radpr (tjd, xic, part, acc3, dadr3, dadp3);
/*todo: air drag acc & partial to vxvyvz dadv*/
for (n = 0; n <= 2; n++)
{
acc[n] = acc1[n] + acc2[n] + acc3[n];
}
for (n = 0; n <= 8; n++)
{
dadr[n] = dadr1[n] + dadr2[n] + dadr3[n];
}
for (n = 0; n < 3 * DYNPAR; n++)
{
dadp[n] = dadp1[n] + dadp2[n] + dadp3[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
for (n = 0; n < 36; n++)
{
dfdx[n] = 0;
}
dfdx[3] = 1;
dfdx[10] = 1;
dfdx[17] = 1;
for (n = 0; n < 3; n++)
{
dfdx[n + 18] = dadr[n];
dfdx[n + 24] = dadr[n + 3];
dfdx[n + 30] = dadr[n + 6];
}
brmul(dfdx, dxdx0, 6, 6, 6, fdxdx0);
fxic[0] = xic[3];
fxic[1] = xic[4];
fxic[2] = xic[5];
fxic[3] = acc[0];
fxic[4] = acc[1];
fxic[5] = acc[2];
brmul(dfdx, dxdp, 6, 6, DYNPAR, dfdpp);
for (n = 0; n < 3 * DYNPAR; n++)
{
dfdp[n] = 0;
dfdp[n + 3 * DYNPAR] = dadp[n];
}
for (n = 0; n < 6 * DYNPAR; n++)
{
fdxdp[n] = dfdpp[n] + dfdp[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
for (n = 0; n < 36; n++)
{
fstate[n] = fdxdx0[n];
}
for (n = 0; n < 6; n++)
{
fstate[n + 36] = fxic[n];
}
for (n = 0; n < 6 * DYNPAR; n++)
{
fstate[n + 42]= fdxdp[n];
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* accel_ntrel - Newtonian + Relativistic acceleration
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double accel_ntrel (double *tjd, double *xic, short int part,
double *acc, double *dadr, double *dadp)
{
int n;
short int ssbary = 11;
double xcb[6], acb[3], xib[6], aib[3], dadr1[9], dum[9],
dadp0[3*DYNPAR], dadp1[3*DYNPAR];
planet_ephemeris (tjd, CENTER, ssbary, &xcb[0], &xcb[3]);
accel_bcrs (tjd, xcb, part, CENTER, acb, dum, dadp0);
for (n = 0; n <= 5; n++)
{
xib[n] = xic[n] + xcb[n];
}
accel_bcrs (tjd, xib, part, 99, aib, dadr1, dadp1);
for (n = 0; n <= 2; n++)
{
acc[n] = aib[n] - acb[n];
}
if (part == 1)
{
for (n = 0; n <= 8; n++)
{
dadr[n] = dadr1[n];
}
for (n = 0; n <= 3 * DYNPAR - 1; n++)
{
dadp[n] = dadp1[n] - dadp0[n];
}
}
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* accel_bcrs - Newtonian + Relativistic acceleration
* @param1: description of param1
* @param2: description of param2
* todo:
1 partial to parameters
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double accel_bcrs (double *jd, double *xi, short int part, short int exclude,
double *acc, double *dadr, double *dadp)
{
double fnt[3], fgr[3], xj[11][6], xij[11][6], rij[11], xjk[6], rjk,
xddj[3], sumil, sumjk, sdi2, sdj2, rdirdj, rrrdr2, rjirdd,
rij5, rij3, xijt[9], gra, grb, beta, gamma, unit[9];
short int ssbary, l, k, j, n, flag_gr;
ssbary = 11;
gamma = 1.0;
beta = 1.0;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
for (j = 0; j <= 10; j++)
{
planet_ephemeris (jd, j, ssbary, &xj[j][0], &xj[j][3]);
for (n = 0; n < 6; n++)
{
xij[j][n] = xi[n] - xj[j][n];
}
rij[j] = sqrt (xij[j][0] * xij[j][0]
+ xij[j][1] * xij[j][1] + xij[j][2] * xij[j][2]);
}
flag_gr = 0;
for (n = 0; n < 3; n ++)
fnt[n] = 0;
for (j = 0; j <= 10; j++)
{
if (PERB[j] == 2)
flag_gr = 1;
if (PERB[j] == 0)
continue;
if (j == exclude)
continue;
for (n = 0; n < 3; n++)
fnt[n] = fnt[n]
- GMDE[j] / (rij[j] * rij[j] * rij[j]) * xij[j][n];
}
if (part == 1)
{
for (n = 0; n <= 3 * DYNPAR - 1; n++)
{
dadp[n] = 0;
}
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
for (j = 0; j <= 10; j++)
{
if (j == exclude)
continue;
rij5 = pow (rij[j], 5);
rij3 = pow (rij[j], 3);
brmul (xij[j], xij[j], 3,1,3, xijt);
for (n = 0; n <= 8; n++)
{
dadr[n] = dadr[n] + 3 * GMDE[j] * xijt[n] / rij5
- GMDE[j] * unit[n] / rij3;
}
}
}
if (flag_gr == 0)
{
for (n = 0; n < 3; n++)
acc[n] = fnt[n];
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdi2 = xi[3] * xi[3] + xi[4] * xi[4] + xi[5] * xi[5];
sumil = 0;
for (l = 0; l < 11; l ++)
{
if ( l == exclude)
continue;
if (PERB[l] != 2)
continue;
sumil = sumil + GMDE[l] / rij[l];
}
for (n = 0; n < 3; n ++)
fgr[n] = 0;
for (j = 0; j < 11; j ++)
{
if (PERB[j] != 2)
continue;
if (j == exclude)
continue;
sumjk = 0;
for (n = 0; n < 3; n ++)
xddj[n] = 0;
for (k = 0; k < 11; k ++)
{
if (k == j)
continue; //k!=j
if (PERB[k] != 2)
continue;
for (n = 0; n < 3; n++)
xjk[n] = xj[j][n] - xj[k][n];
rjk = sqrt (xjk[0] * xjk[0] + xjk[1] * xjk[1] + xjk[2] * xjk[2]);
sumjk = sumjk + GMDE[k] / rjk;
for (n = 0; n < 3; n ++)
xddj[n] = xddj[n] - GMDE[k] / (rjk * rjk * rjk) * xjk[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
sdj2 = xj[j][3] * xj[j][3] + xj[j][4] * xj[j][4]
+ xj[j][5] * xj[j][5];
rdirdj = xi[3] * xj[j][3] + xi[4] * xj[j][4] + xi[5] * xj[j][5];
rrrdr2 = pow( ( xij[j][0] * xj[j][3] + xij[j][1] * xj[j][4]
+ xij[j][2] * xj[j][5]) / rij[j], 2);
rjirdd = - ( xij[j][0] * xddj[0] + xij[j][1] * xddj[1]
+ xij[j][2] * xddj[2]);
gra = - 2 * (beta + gamma) * sumil - (2 * beta -1) * sumjk
+ gamma * sdi2 + (1 + gamma) * sdj2
- 2 * (1 + gamma) * rdirdj - 1.5 * rrrdr2 + 0.5 * rjirdd;
grb = xij[j][0] * ((2+2*gamma) * xi[3] - (1+2*gamma) * xj[j][3])
+ xij[j][1] * ((2+2*gamma) * xi[4] - (1+2*gamma) * xj[j][4])
+ xij[j][2] * ((2+2*gamma) * xi[5] - (1+2*gamma) * xj[j][5]);
for (n = 0; n < 3; n ++)
{
fgr[n] = fgr[n]
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* ( - xij[j][n]) * gra / C_AUDAY / C_AUDAY
+ GMDE[j] / (rij[j] * rij[j] * rij[j])
* xij[j][n + 3] * grb / C_AUDAY / C_AUDAY
+ GMDE[j] / rij[j] * (3 + 4 * gamma) * 0.5
* xddj[n] / C_AUDAY / C_AUDAY;
}
}
for (n = 0; n < 3; n++)
acc[n] = fgr[n] + fnt[n];
return 1;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* accel_radpr - solar radiation press & partial to srp coefficients
* @param1: description of param1
* @param2: description of param2
* todo:
1 earth shadow
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double accel_radpr (double *tjd, double *xic, short int part,
double *acc, double *dadr, double *dadp)
{
double j, c1, ap, m, rsp, usp[3], xis[6], xsc[6], f,
xist[9], unit[9], rsp3;
short int n, sun;
sun = 10;
unit[0] = 1; unit[1] = 0; unit[2] = 0;
unit[3] = 0; unit[4] = 1; unit[5] = 0;
unit[6] = 0; unit[7] = 0; unit[8] = 1;
planet_ephemeris (tjd, sun, CENTER, &xsc[0], &xsc[3]);
for (n = 0; n <= 5; n++)
{
xis[n] = xic[n] - xsc[n];
}
rsp = sqrt (xis[0] * xis[0] + xis[1] * xis[1] + xis[2] * xis[2]);
usp[0] = xis[0] / rsp;
usp[1] = xis[1] / rsp;
usp[2] = xis[2] / rsp;
j = 1352.5; //kg/s3
m = SATMASS; //kg
ap = SATAREA; //m2
c1 = j / C * 1 * 1; //kg/s2/m*au*au
f = c1 * ap / m / rsp / rsp;
//kg/s2/m*au*au * m2 / kg / au / au = m/s2
f = f / AU * 86400.0 * 86400.0;
// acc[0] = f * usp[0] * (1 + CONS + DCON * tjd[1]);
// acc[1] = f * usp[1] * (1 + CONS + DCON * tjd[1]);
// acc[2] = f * usp[2] * (1 + CONS + DCON * tjd[1]);
acc[0] = f * usp[0];
acc[1] = f * usp[1];
acc[2] = f * usp[2];
if (part == 0)
return 1;
rsp3 = rsp * rsp * rsp;
brmul (xis, xis, 3,1,3, xist);
for (n = 0; n <= 8; n++)
dadr[n] = - f * (3 * xist[n] / rsp3 - unit[n] / rsp) ;
// * (1 + CONS + DCON * tjd[1]);
// for (n = 0; n <= 2; n++)
// {
// dadp[n * DYNPAR] = f * usp[n];
// dadp[n * DYNPAR + 1] = dadp[n * DYNPAR] * tjd[1];
// dadp[n * DYNPAR] = 0;
// dadp[n * DYNPAR + 1] = 0;
// }
for (n = 0; n <= 3 * DYNPAR - 1; n++)
{
dadp[n] = 0;
}
return 0;
}
// nmax = 4;
// stcs = (double *) calloc ( (nmax + 1) * (nmax + 1), sizeof(double));
double stidecs_old(double *tjd, double gma1, double k2,
double *c20, double *c21, double *s21, double *c22, double *s22)
{
double gms2e;
double gmm2e;
// short int moon = 9, earth = 2, sun = 10;
short int moon = 2, earth = 9, sun = 10;
double ps[3], vs[3], pm[3], vm[3],
pse[3], pme[3], llrs[3], llrm[3], pbar[4], t,
p20m, p30m, p21m, p31m, p22m, p32m, p33m,
p20s, p30s, p21s, p31s, p22s, p32s, p33s,
rerm, rers, tb[9], tbt[9];
// Luni-solar ephemeris
planet_ephemeris (tjd, sun, CENTER, ps, vs);
planet_ephemeris (tjd, moon, CENTER, pm, vm);
iau_pns (tjd, tb, CENTER);
mt (tb, 3, 3, tbt);
brmul (tbt,ps,3,3,1,pse);
brmul (tbt,pm,3,3,1,pme);
xyz2llh(pse, llrs);
xyz2llh(pme, llrm);
t = sin(llrm[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20m = pbar[2]; p30m = pbar[3];
lgdr(t, 3, 1, pbar); p21m = pbar[1]; p31m = pbar[2];
lgdr(t, 3, 2, pbar); p22m = pbar[0]; p32m = pbar[1];
lgdr(t, 3, 3, pbar); p33m = pbar[0];
t = sin(llrs[0] * DEG2RAD);
lgdr(t, 3, 0, pbar); p20s = pbar[2]; p30s = pbar[3];
lgdr(t, 3, 1, pbar); p21s = pbar[1]; p31s = pbar[2];
lgdr(t, 3, 2, pbar); p22s = pbar[0]; p32s = pbar[1];
lgdr(t, 3, 3, pbar); p33s = pbar[0];
gms2e = GMDE[sun]/GMDE[CENTER];
// gmm2e = GMDE[moon]/GMDE[CENTER];
gmm2e = 0;
rerm = gma1 / llrm[2];
rers = gma1 / llrs[2];
// Frequency Independent Terms
// C20
*c20 = k2/5.0 * ( gmm2e * pow(rerm, 3) * p20m
+ gms2e * pow(rers, 3) * p20s );
// C21/S21
*c21 = k2/5.0 * ( gmm2e * pow(rerm, 3) * p21m * cos(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * cos(llrs[1] * DEG2RAD) );
*s21 = k2/5.0 * ( gmm2e * pow(rerm, 3) * p21m * sin(llrm[1] * DEG2RAD)
+ gms2e * pow(rers, 3) * p21s * sin(llrs[1] * DEG2RAD) );
// C22/S22
*c22 = k2/5.0 * ( gmm2e * pow(rerm, 3) * p22m * cos(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * cos(llrs[1] * DEG2RAD * 2.0) );
*s22 = k2/5.0 * ( gmm2e * pow(rerm, 3) * p22m * sin(llrm[1] * DEG2RAD * 2.0)
+ gms2e * pow(rers, 3) * p22s * sin(llrs[1] * DEG2RAD * 2.0) );
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* accel_nonsp - non-spherical force
* @param1: description of param1
* @param2: description of param2
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double accel_nonsp (double *tjd, double *xic, short int part,
double *acc, double *dadr, double *dadp)
{
int n, m, i;
static int flag = 0;
// static double *cn0, *cnm, *snm, gma[2];
double xfc[3] = {0}, tb[9] = {0}, tbt[9] ={0}, rmat[9] = {0},
gmat[9] = {0}, gmatt[9] = {0}, r, rxy, sinf, cosf, sinl, cosl,
lamta, *pn, *pnm, *pnp, *pnmp, *pnpp, *pnmpp,
frj[3] = {0}, frcs[3] = {0}, fr[3] = {0}, *cosml, *sinml, *aprn,
peprp[27], pepr[27], prtpx[9], prtpy[9], prtpz[9], pgtpx[9],
pgtpy[9], pgtpz[9], pgx[3], pgy[3], pgz[3], part1[9], prjpx[3],
prjpy[3], prjpz[3], prcspx[3], prcspy[3], prcspz[3], prpr[9],
gtpr[9], part2[9], prtpxx[9], prtpyy[9], prtpzz[9];
double cunit, dfd2r[3], dfd2[3], dfdkr[3], dfdk[3], k2, c20, c21, s21, c22, s22,
unit, nup, ndown;
if (part == 1)
{
for (n = 0; n <= 3 * DYNPAR - 1; n++)
{
dadp[n] = 0;
}
for (n = 0; n <= 8; n++)
{
dadr[n] = 0;
}
}
if (GRAVDEGREE < 2)
{
for (n = 0; n <= 2; n++)
{
acc[n] = 0;
}
return 1;
}
if (flag != 9) //
{
cn0 = (double *) calloc (GRAVDEGREE, sizeof(double));
cnm = (double *) calloc (GRAVDEGREE * GRAVDEGREE, sizeof(double));
snm = (double *) calloc (GRAVDEGREE * GRAVDEGREE, sizeof(double));
opengravfile (cn0, cnm, snm, gma);
flag = 9;
}
k2 = CONS;
stidecs_old(tjd, gma[1], k2, &c20, &c21, &s21, &c22, &s22);
// cn0[1] = (-8.745054708184200e-04 + c20 + DCON * 1.0e-16 * tjd[1]) * sqrt(5);
n = 2;
cn0[n-1] = (j2 + c20 + DCON * 1.0e-8) * sqrt(2*n+1);
n = 3;
cn0[n-1] = (j3 + BIAS * 1.0e-8) * sqrt(2*n+1);
n = 4;
cn0[n-1] = (j4 + DBIA * 1.0e-8) * sqrt(2*n+1);
n = 2; m = 1;
{
nup = 1.0;
ndown = 1.0;
for (i = 1; i <= n - m; i++)
nup = nup * i;
for (i = 1; i <= n + m; i++)
ndown = ndown * i;
unit = sqrt (2 * (2*n+1.0)*nup/ndown);
cnm[(n-1)*GRAVDEGREE + (m-1)] = (jc21 + c21)* unit;
snm[(n-1)*GRAVDEGREE + (m-1)] = (js21 + s21)* unit;
}
n = 2; m = 2;
{
nup = 1.0;
ndown = 1.0;
for (i = 1; i <= n - m; i++)
nup = nup * i;
for (i = 1; i <= n + m; i++)
ndown = ndown * i;
unit = sqrt (2 * (2*n+1.0)*nup/ndown);
cnm[(n-1)*GRAVDEGREE + (m-1)] = (jc22 + c22)* unit;
snm[(n-1)*GRAVDEGREE + (m-1)] = (js22 + s22)* unit;
}
// cn0[2] = -1.188691064601560e-05 * sqrt(7) * (1 + DCON);
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*****************************rotation matrix ********************************/
iau_pns (tjd, tb, CENTER); //tb
mt (tb, 3, 3, tbt); //tbt
brmul (tbt,xic,3,3,1,xfc); //rb
r = sqrt (xfc[0] * xfc[0] + xfc[1] * xfc[1] + xfc[2] * xfc[2]);
//define up-east-north system
rxy = sqrt (xfc[0] * xfc[0] + xfc[1] * xfc[1]);
sinf = xfc[2] / r;
cosf = rxy / r;
sinl = xfc[1] / rxy;
cosl = xfc[0] / rxy;
rmat[0] = cosf * cosl; //from fixed to up-east-north system: rmat
rmat[1] = cosf * sinl;
rmat[2] = sinf;
rmat[3] = -sinl;
rmat[4] = cosl;
rmat[5] = 0;
rmat[6] = -sinf * cosl;
rmat[7] = -sinf * sinl;
rmat[8] = cosf;
brmul (rmat,tbt,3,3,3,gmat); //inertial to fixed matrix gmat = rmat*tbt
mt (gmat, 3, 3, gmatt); //fixed to inertial matrix gmatt
lamta = chosephase (sinl, cosl); //rad
cosml = (double *) calloc ( GRAVDEGREE, sizeof(double)); //cos(m*lamta)
sinml = (double *) calloc ( GRAVDEGREE, sizeof(double)); //sin(m*lamta)
aprn = (double *) calloc ( GRAVDEGREE, sizeof(double)); //sin(m*lamta)
for (m = 1; m <= GRAVDEGREE; m++)
{
cosml[m-1] = cos(m*lamta);
sinml[m-1] = sin(m*lamta);
}
for (n = 1; n <= GRAVDEGREE; n++)
{
aprn[n-1] = pow (gma[1] / r, n);
}
/******************************************************************/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/****************Legendre Polynomial**********************************/
pn = (double *) calloc ( GRAVDEGREE, sizeof(double));
//Pn
pnp = (double *) calloc ( GRAVDEGREE, sizeof(double));
//Pn'
pnm = (double *) calloc ( GRAVDEGREE * GRAVDEGREE, sizeof(double));
//secf*Pmn
pnmp = (double *) calloc ( GRAVDEGREE * GRAVDEGREE, sizeof(double));
//cosf*Pmn'
pnpp = (double *) calloc ( GRAVDEGREE, sizeof(double));
//Pn''
pnmpp = (double *) calloc ( GRAVDEGREE * GRAVDEGREE, sizeof(double));
//cos2fPmn''
pn[0] = sinf;
pnp[0] = 1;
pnpp[0] = 0;
pn[1] = 3.0/2.0*sinf*sinf - 1.0/2.0;
pnp[1] = sinf + 2 * sinf;
pnpp[1] = 3;
for (n = 3; n <= GRAVDEGREE; n++)
{
pn[n-1] = (2 * n - 1.0) / n * sinf * pn[n-2]
- (n - 1.0) / n * pn[n-3]; //tmd!!!
pnp[n-1] = sinf * pnp[n-2] + n * pn[n-2];
pnpp[n-1] = sinf * pnpp[n-2] + (n+1) * pnp[n-2];
}
pnm[0] = 1; //secfP11 = 1
for (n = 2; n <= GRAVDEGREE; n++)
{
pnm[(n-1) * GRAVDEGREE + n - 1]
= (2 * n - 1.0) * cosf * pnm[(n - 2) * GRAVDEGREE + (n - 2)];
}
pnm[GRAVDEGREE] = (2 * 2.0 - 1.0) / (2 - 1.0) * sinf * pnm[0];
//secfP21 = pnm[GRAVDEGREE]
for (n = 3; n <= GRAVDEGREE; n++)
{
for (m = 1; m < n; m++)
{
pnm[(n-1) * GRAVDEGREE + (m-1)] = (2 * n - 1.0) / (n-m)
* sinf * pnm[(n-2) * GRAVDEGREE + (m-1)]
- (n + m - 1.0) / (n - m)
* pnm[(n - 3) * GRAVDEGREE + (m - 1)];
// printf ("%d\t%d\t%f\n", n, m, pnm[(n-1)*n2 + (m-1)]);
}
}
pnmp[0] = -sinf * pnm[0]; //cosfP11'
for (n = 2; n <= GRAVDEGREE; n++)
{
for (m = 1; m <= n; m++)
{
pnmp[(n - 1) * GRAVDEGREE + (m - 1)] =
- n * sinf * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
+ (n + m) * pnm[(n - 2) * GRAVDEGREE + (m - 1)];
}
}
pnmpp[0] = sinf * pnmp[0] / cosf - pnm[0] * cosf;
//cos2fP11''
pnmpp[GRAVDEGREE] = sinf * pnmp[GRAVDEGREE] / cosf
- pnm[GRAVDEGREE] * cosf - 3 * sinf * pnm[GRAVDEGREE + 1];
//cos2fP21'' = pnmpp[GRAVDEGREE]
pnmpp[GRAVDEGREE + 1] = - 2 * pnm[GRAVDEGREE + 1] * cosf;
//cos2fP22'' = pnmpp[GRAVDEGREE+1]
for (n = 3; n <= GRAVDEGREE; n++)
{
for (m = 1; m <= n; m++)
{
if (m == 1)
{
pnmpp[(n - 1) * GRAVDEGREE + (m - 1)] =
+ sinf * pnmp[(n - 1) * GRAVDEGREE + (m - 1)] / cosf
- pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf
- 3 * sinf * pnm[(n - 1) * GRAVDEGREE + (m - 1) + 1]
+ pnm[(n - 1) * GRAVDEGREE + (m - 1) + 2] * cosf;
}
else
{
pnmpp[(n-1)*GRAVDEGREE + (m-1)] = - (n - 2) * sinf
* pnmp[(n - 1) * GRAVDEGREE + (m-1)] / cosf
+ (n + m) * pnmp[(n - 2) * GRAVDEGREE + (m-1)] / cosf
- n * pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf;
}
}
}
/*****************Legendre Polynomial**********************************/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/**********************************************************/
for (n = 1; n <= GRAVDEGREE; n++)
{
frj[0] = frj[0] + (-cn0[n - 1]) * aprn[n - 1] * (n + 1) * pn[n - 1];
frj[1] = frj[1] + 0;
frj[2] = frj[2] + (-cn0[n - 1]) * aprn[n - 1] * (-cosf) * pnp[n - 1];
}
for (n = 1; n <= GRAVDEGREE; n++)
{
for (m = 1; m <= n; m++)
{
if ( n == GRAVDEGREE && m > GRAVORDER)
{
// printf ("%d\t%d\n",n,m);
break;
}
frcs[0] = frcs[0] + aprn[n - 1]
* ( - (n + 1)) * pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
frcs[1] = frcs[1] + aprn[n - 1]
* m * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
* (-cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
frcs[2] = frcs[2] + aprn[n-1]
* pnmp[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
}
}
for (n = 0; n < 3; n++)
{
fr[n] = (frj[n] + frcs[n]) * gma[0] / r / r;
}
brmul(gmatt,fr,3,3,1,acc); //from fixed acc to inertial acc
if (part == 0)
{
free (pn);
free (pnp);
free (pnm);
free (pnmp);
free (pnpp);
free (pnmpp);
free (cosml);
free (sinml);
free (aprn);
return 1;
}
/*************************************************************/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/************************partial part1***********************************/
peprp[0] = 0;
peprp[1] = - sinl / r;
peprp[2] = - sinf * cosl / r; //(573)
peprp[3] = 0;
peprp[4] = cosl / r;
peprp[5] = - sinf * sinl / r; //(574) //11.10change
peprp[6] = 0;
peprp[7] = 0;
peprp[8] = cosf / r; //(575)
peprp[9] = 0;
peprp[10] = - cosl / r / cosf;
peprp[11] = 0; //(576)
peprp[12] = 0;
peprp[13] = - sinl / r / cosf;
peprp[14] = 0; //(577)
peprp[15] = 0;
peprp[16] = 0;
peprp[17] = 0; //(578)
peprp[18] = 0;
peprp[19] = sinf * sinl / r / cosf;
peprp[20] = - cosf * cosl / r; //(579)
peprp[21] = 0;
peprp[22] = - sinf * cosl / r / cosf;
peprp[23] = - cosf * sinl / r; //(580)
peprp[24] = 0;
peprp[25] = 0;
peprp[26] = - sinf / r; //(581)
brmul(peprp, gmat, 9, 3, 3, pepr); //571
for (n = 0; n < 9; n++) //570
{
prtpx[n] = pepr[n*3];
prtpy[n] = pepr[n*3+1];
prtpz[n] = pepr[n*3+2];
}
mt (prtpx, 3, 3, prtpxx); //OH MY GOD!!!11.11
mt (prtpy, 3, 3, prtpyy); //OH MY GOD!!!11.11
mt (prtpz, 3, 3, prtpzz); //OH MY GOD!!!11.11
brmul(tb, prtpxx, 3, 3, 3, pgtpx); //568
brmul(tb, prtpyy, 3, 3, 3, pgtpy);
brmul(tb, prtpzz, 3, 3, 3, pgtpz);
brmul(pgtpx, fr, 3, 3, 1, pgx); //558 first term //11.10 change
brmul(pgtpy, fr, 3, 3, 1, pgy);
brmul(pgtpz, fr, 3, 3, 1, pgz);
for (n = 0; n < 3; n++)
{
part1[n*3] = pgx[n];
part1[n*3+1] = pgy[n];
part1[n*3+2] = pgz[n];
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/************************partial part2*************************************/
for (n = 0; n < 3; n++)
{
prjpx[n] = 0;
prjpy[n] = 0;
prjpz[n] = 0;
prcspx[n] = 0;
prcspy[n] = 0;
prcspz[n] = 0;
}
for (n = 1; n <= GRAVDEGREE; n++)
{
prjpx[0] = prjpx[0] - (-cn0[n - 1]) * aprn[n - 1]
* (n + 1) * pn[n - 1] * (n + 2); //561
prjpx[2] = prjpx[2] - (-cn0[n - 1]) * aprn[n - 1]
* (-cosf) * pnp[n - 1] * (n + 2); //561
prjpz[0] = prjpz[0] + (-cn0[n - 1]) * aprn[n - 1]
* (n + 1) * cosf * pnp[n - 1]; //563
prjpz[2] = prjpz[2] + (-cn0[n - 1]) * aprn[n - 1]
* ( sinf * pnp[n - 1] - cosf * cosf * pnpp[n - 1] );//563
}
for (n = 1; n <= GRAVDEGREE; n++)
{
for (m = 1; m <= n; m++)
{
if ( n == GRAVDEGREE && m > GRAVORDER)
{
// printf ("%d\t%d\n",n,m);
break;
}
//from 564 to 566
prcspx[0] = prcspx[0] - aprn[n - 1]
* ( - (n + 1)) * pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1])
* (n + 2);
prcspx[1] = prcspx[1] - aprn[n - 1]
* m * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
* ( - cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1])
* (n + 2);
prcspx[2] = prcspx[2] - aprn[n - 1]
* pnmp[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1])
* (n + 2);
prcspy[0] = prcspy[0] + m * aprn[n - 1]
* (n + 1) * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
- snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
prcspy[1] = prcspy[1] + m * aprn[n - 1]
* m * pnm[(n - 1) * GRAVDEGREE + (m - 1)] / cosf
* ( - cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
- snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
prcspy[2] = prcspy[2] + m * aprn[n - 1]
* pnmp[(n - 1) * GRAVDEGREE + (m - 1)] / cosf
* ( - cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
prcspz[0] = prcspz[0] + aprn[n - 1]
* ( - (n + 1)) * pnmp[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
prcspz[1] = prcspz[1] + aprn[n - 1]
* m * (sinf * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
+ pnmp[(n - 1) * GRAVDEGREE + (m - 1)]) / cosf
* ( - cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
prcspz[2] = prcspz[2] + aprn[n - 1]
* ( pnmpp[(n - 1) * GRAVDEGREE + (m - 1)]
- pnmp[(n - 1) * GRAVDEGREE + (m - 1)] * sinf / cosf)
* ( cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
}
}
for (n = 0; n < 3; n++)
{
prpr[n*3] = (prjpx[n] + prcspx[n]) * gma[0] / r / r / r;
prpr[n*3+1] = (prjpy[n] + prcspy[n]) * gma[0] / r / r / r;
prpr[n*3+2] = (prjpz[n] + prcspz[n]) * gma[0] / r / r / r;
}
brmul(prpr, gmat, 3, 3, 3, gtpr);
brmul(gmatt, gtpr, 3, 3, 3, part2);
for (n = 0; n <= 8; n++)
{
dadr[n] = part1[n] + part2[n];
}
/*****************************************************************************/
n = 2;
dfd2r[0] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (n + 1) * pn[n - 1];
dfd2r[1] = 0;
dfd2r[2] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (-cosf) * pnp[n - 1];
for (n = 0; n < 3; n++)
{
dfd2r[n] = dfd2r[n] * gma[0] / r / r;
}
brmul(gmatt,dfd2r,3,3,1,dfd2);
for (n = 0; n <= 2; n++)
{
// dadp[n * DYNPAR] = dfd2[n];
// dadp[n * DYNPAR + 1] = dfd3[n];
dadp[n * DYNPAR + 1] = dfd2[n];
}
n = 3;
dfd2r[0] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (n + 1) * pn[n - 1];
dfd2r[1] = 0;
dfd2r[2] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (-cosf) * pnp[n - 1];
for (n = 0; n < 3; n++)
{
dfd2r[n] = dfd2r[n] * gma[0] / r / r;
}
brmul(gmatt,dfd2r,3,3,1,dfd2);
for (n = 0; n <= 2; n++)
{
// dadp[n * DYNPAR] = dfd2[n];
// dadp[n * DYNPAR + 1] = dfd3[n];
dadp[n * DYNPAR + 2] = dfd2[n];
}
n = 4;
dfd2r[0] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (n + 1) * pn[n - 1];
dfd2r[1] = 0;
dfd2r[2] = (- 1.0e-8 * sqrt(2*n+1)) * aprn[n - 1] * (-cosf) * pnp[n - 1];
for (n = 0; n < 3; n++)
{
dfd2r[n] = dfd2r[n] * gma[0] / r / r;
}
brmul(gmatt,dfd2r,3,3,1,dfd2);
for (n = 0; n <= 2; n++)
{
// dadp[n * DYNPAR] = dfd2[n];
// dadp[n * DYNPAR + 1] = dfd3[n];
dadp[n * DYNPAR + 3] = dfd2[n];
}
k2 = 1;
stidecs_old(tjd, gma[1], k2, &c20, &c21, &s21, &c22, &s22);
n = 2;
cn0[n-1] = (c20) * sqrt(2*n+1);
dfdkr[0] = (-cn0[n - 1]) * aprn[n - 1] * (n + 1) * pn[n - 1];
dfdkr[1] = 0;
dfdkr[2] = (-cn0[n - 1]) * aprn[n - 1] * (-cosf) * pnp[n - 1];
n = 2; m = 1;
{
nup = 1.0;
ndown = 1.0;
for (i = 1; i <= n - m; i++)
nup = nup * i;
for (i = 1; i <= n + m; i++)
ndown = ndown * i;
unit = sqrt (2 * (2*n+1.0)*nup/ndown);
cnm[(n-1)*GRAVDEGREE + (m-1)] = ( c21)* unit;
snm[(n-1)*GRAVDEGREE + (m-1)] = ( s21)* unit;
dfdkr[0] = dfdkr[0] + aprn[n - 1]
* ( - (n + 1)) * pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
dfdkr[1] = dfdkr[1] + aprn[n - 1]
* m * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
* (-cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
dfdkr[2] = dfdkr[2] + aprn[n-1]
* pnmp[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
}
n = 2; m = 2;
{
nup = 1.0;
ndown = 1.0;
for (i = 1; i <= n - m; i++)
nup = nup * i;
for (i = 1; i <= n + m; i++)
ndown = ndown * i;
unit = sqrt (2 * (2*n+1.0)*nup/ndown);
cnm[(n-1)*GRAVDEGREE + (m-1)] = ( c22)* unit;
snm[(n-1)*GRAVDEGREE + (m-1)] = ( s22)* unit;
dfdkr[0] = dfdkr[0] + aprn[n - 1]
* ( - (n + 1)) * pnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosf
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
dfdkr[1] = dfdkr[1] + aprn[n - 1]
* m * pnm[(n - 1) * GRAVDEGREE + (m - 1)]
* (-cnm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]);
dfdkr[2] = dfdkr[2] + aprn[n-1]
* pnmp[(n - 1) * GRAVDEGREE + (m - 1)]
* (cnm[(n - 1) * GRAVDEGREE + (m - 1)] * cosml[m - 1]
+ snm[(n - 1) * GRAVDEGREE + (m - 1)] * sinml[m - 1]);
}
for (n = 0; n < 3; n++)
{
dfdkr[n] = dfdkr[n] * gma[0] / r / r;
}
brmul(gmatt,dfdkr,3,3,1,dfdk);
for (n = 0; n <= 2; n++)
{
dadp[n * DYNPAR] = dfdk[n];
// dadp[n * DYNPAR + 1] = dfd3[n];
// dadp[n * DYNPAR + 1] = dfd2[n];
}
/*****************************************************************************/
free (pn);
free (pnp);
free (pnm);
free (pnmp);
free (pnpp);
free (pnmpp);
free (cosml);
free (sinml);
free (aprn);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* opengravfile - open gravity field
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double opengravfile (double *cn0, double *cnm, double *snm, double *gma)
{
FILE *fp_gra;
double value, c,s, nup, ndown, unit;
int n,m, i;
char string[100], name[20];
if ((fp_gra = fopen (FILE_GRV,"r")) == NULL)
{
printf ("Cannot open gravity file?\n");
getch();
exit (0);
}
while (feof (fp_gra) == 0)
{
fgets (string, 100, fp_gra);
sscanf (string, "%s%lf", name, &value);
if (strcmp (name,"Gm") ==0)
{
gma[0] = value / AU / AU / AU * 86400.0 * 86400.0;
}
if (strcmp (name,"RefDistance") ==0)
{
gma[1] = value / AU;
}
if (strcmp (name,"BEGIN") ==0)
break;
}
while (feof(fp_gra) == 0)
{
n = 999;
m = 999;
fgets (string, 100, fp_gra);
sscanf (string, "%d%d%lf%lf", &n, &m, &c, &s);
if (n > GRAVDEGREE)
continue;
else if (m == 0)
{
unit = sqrt (2 * n + 1.0);
cn0[n-1] = c * unit;
if (n == 2) j2 = c;
if (n == 3) j3 = c;
if (n == 4) j4 = c;
}
else
{
if (n == 2 && m == 1) {jc21 = c; js21 = s; }
if (n == 2 && m == 2) {jc22 = c; js22 = s; }
nup = 1.0;
ndown = 1.0;
for (i = 1; i <= n - m; i++)
nup = nup * i;
for (i = 1; i <= n + m; i++)
ndown = ndown * i;
unit = sqrt (2 * (2*n+1.0)*nup/ndown);
cnm[(n-1)*GRAVDEGREE + (m-1)] = c * unit;
snm[(n-1)*GRAVDEGREE + (m-1)] = s * unit;
}
}
fclose(fp_gra);
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* itrf2icrf - from earth fixed to earth inertial
* @param1: description of param1
* @param2: description of param2
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double itrf2icrf(double jd, double utc, double *vt, double *vc)
{
double xp = 0, yp = 0, ut1_utc = 0, dx = 0, dy = 0, delta_t, ut1, tt;
geteop (utc, &xp, &yp, &ut1_utc, &dx, &dy);
delta_t = 32.184 + LEAPSECS - ut1_utc;
ut1 = utc + ut1_utc;
tt = utc + (LEAPSECS + 32.184);
cel_pole (jd + tt / 86400.0, 2, dx * 1e3, dy * 1e3);
ter2cel (jd, ut1 / 86400.0, delta_t, 1, ACCURACY, 0,
xp, yp, vt, vc); /*--vc unit: m--*/
return 0;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* iau_pns - planet fixed to J2000 inertial (for gravity field)
Report of the IAU/IAGWorking Group on cartographic
coordinates and rotational elements: 2006
* @param1: description of param1
* @param2: description of param2
* todo:
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void iau_pns (double *jd, double *te, int cent)
{
double tes[9] = {0}, tepn[9] ={0}, tb[9], utc;
double vx[3] = {1,0,0}, vy[3] = {0,1,0}, vz[3] = {0,0,1}, te2[9];
// double ty[3], tz[3], te1[9], tx[3];
int i;
if (cent == 2)
{
utc = jd[1] * 86400 - (LEAPSECS + 32.184); //jd[1]: tt(tdt)
lagrange (TE_EPH, DIM_TE, 10, utc, te2);
/*
itrf2icrf(jd[0], utc, vx, tx);
itrf2icrf(jd[0], utc, vy, ty);
itrf2icrf(jd[0], utc, vz, tz);
for (i = 0; i < 3; i++)
{
te1[i*3] = tx[i];
te1[i*3+1] = ty[i];
te1[i*3+2] = tz[i];
}
*/
for (i = 0; i < 9; i++)
te[i] = te2[i];
}
else if (cent == 9)
{
mbf2cel (jd, te);
// in2pa (jd, tb);
// mt (tb, 3, 3, te);
}
else
{
cent = cent +1;
iau_s (jd, tes, cent); //IAU fixed to IAU inertial
iau_pn (jd, tepn, cent); //IAU inertial to J2000 inertial
brmul (tepn,tes,3,3,3,te);
}
return;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* iau_s - from IAU fixed to IAU inertial, true-of-date equator and equinox
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void iau_s (double *jd, double *tes, int cent)
{
double d, str, cosst, sinst;
d = jd[0] - 2451545.0;
d = d + jd[1];
switch (cent) //sun0, mercury1, ..., pluto9
{
case 0 : str = 84.176 + 14.1844000 * d; break;
case 1 : str = 329.548 + 6.1385025 * d; break;
case 2 : str = 160.20 - 1.4813688 * d; break;
case 3 : str = 190.147 + 360.9856235 * d; break;
case 4 : str = 176.63 + 350.89198226 * d; break;
case 5 : str = 284.95 + 870.5366420 * d; break;
case 6 : str = 38.90 + 810.7939024 * d; break;
case 7 : str = 203.81 - 501.1600928 * d; break;
case 8 : str = 253.18 + 536.3128492 * d -
0.48 * sin ((357.85 + 52.316 * d / 36525.0 ) * DEG2RAD);
break;
case 9 : str = 237.305 - 56.3625225 * d; break;
case I_TITAN : str = 186.5855 + 22.5769768 * d; break;
}
cosst = cos (str * DEG2RAD);
sinst = sin (str * DEG2RAD);
tes[0] = cosst;
tes[1] = -sinst;
tes[2] = 0;
tes[3] = sinst;
tes[4] = cosst;
tes[5] = 0;
tes[6] = 0;
tes[7] = 0;
tes[8] = 1;
return;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* iau_pn - from IAU inertial (for all planets) to J2000 inertial
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void iau_pn (double *jd, double *tes, int cent)
{
double ra0, dec0, jcent, cr, sr, cd, sd;
jcent = jd[0] - 2451545.0;
jcent = (jcent + jd[1]) / 36525.0;
switch (cent) //sun0, mercury1, ..., pluto9
{
case 0 :
ra0 = 286.13;
dec0 = 63.87;
break;
case 1 :
ra0 = 281.01 - 0.033 * jcent;
dec0 = 61.45 - 0.005 * jcent;
break;
case 2 :
ra0 = 272.76;
dec0 = 67.16;
break;
case 3 :
ra0 = 0.00 - 0.641 * jcent;
dec0 = 90.0 - 0.557 * jcent;
break;
case 4 :
ra0 = 317.68143 - 0.1061 * jcent;
dec0 = 52.88650 - 0.0609 * jcent;
break;
case 5 :
ra0 = 268.05 - 0.009 * jcent;
dec0 = 64.49 + 0.003 * jcent;
break;
case 6 :
ra0 = 40.589 - 0.036 * jcent;
dec0 = 83.537 - 0.004 * jcent;
break;
case 7 :
ra0 = 257.311;
dec0 = -15.175;
break;
case 8 :
ra0 = 299.36 + 0.70 * sin ((357.85 + 52.316 * jcent) * DEG2RAD);
dec0 = 43.46 - 0.51 * cos ((357.85 + 52.316 * jcent) * DEG2RAD);
break;
case 9 :
ra0 = 313.02;
dec0 = 9.09;
break;
case I_TITAN :
ra0 = 39.4827;
dec0 = 83.4279;
break;
}
cr = cos (ra0 * DEG2RAD);
sr = sin (ra0 * DEG2RAD);
cd = cos (dec0 * DEG2RAD);
sd = sin (dec0 * DEG2RAD);
tes[0] = -sr;
tes[1] = -cr * sd;
tes[2] = cr * cd;
tes[3] = cr;
tes[4] = -sr * sd;
tes[5] = sr * cd;
tes[6] = 0;
tes[7] = cd;
tes[8] = sd;
return;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* in2pa - from inertial to moon fixed (PA)
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void in2pa(double *jd, double *te)
{
double lib[6] = {0}, tb1[9], tb2[9], tb3[9], tb32[9];
int target, center;
target = 15;
center = 0;
// DPLEPH(jd, &target, ¢er, lib);
rotmatz (lib[0], tb1, 0);
rotmatx (lib[1], tb2, 0);
rotmatz (lib[2], tb3, 0);
brmul(tb3, tb2, 3, 3, 3, tb32);
brmul(tb32, tb1, 3, 3, 3, te);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* mbf2cel - simulate doppler observable
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
short int mbf2cel (double *jd_tdb, double *te)
/*
------------------------------------------------------------------------
PURPOSE:
This function rotates a vector from the moon body-fixed system
to the celestial system.
REFERENCES:
P. Kenneth Seidelmann et. al. (2007). Report of the IAU/IAGWorking
Group on cartographic coordinates and rotational elements: 2006
INPUT
ARGUMENTS:
jd_tdb[2] (double)
TDB Julian date.
High-order part (jd_tdb[0]) & Low-order part (jd_tdb[0]).
method (short int)
Selection for method
= 0 ... IAU report formulae
= 1 ... NASA/JPL DE/LE ephemeris
ref_sys (short int)
Reference system in which moon body-fixed system is given
= 0 ... Mean Earth/polar axis (ME) system
= 1 ... Principal Axis (PA) system
derivation (short int)
Seclection derivation of parameters
= 0 ... No derivation, vecc is normal
= 1 ... fisrt parameter derivation, vecc is derivation
= 2 ... second
= 3 ... third
vecm[3] (double)
Position vector referred to moon body-fixed system
OUTPUT
ARGUMENTS:
vecc[3] (double)
Position vector referred to ICRF axes (celestial system)
RETURNED
VALUE:
= 0 ... everything is ok.
= 1 ... invalid value of 'ref_sys'
= 2 ... invalid value of 'method'
GLOBALS
USED:
FUNCTIONS
CALLED:
VER./DATE/
PROGRAMMER:
V1.0/03-10/ (SHAO).
NOTES:
------------------------------------------------------------------------
*/
{
short int error = 0;
double tb1[9], tb2[9], tbt1[9], tbt2[9];
/*
IAU report formulae
*/
me2pa(tb1);
mt(tb1, 3, 3, tbt1);
in2me(jd_tdb, tb2, 0);
mt(tb2, 3, 3, tbt2);
brmul(tbt2,tbt1,3,3,3,te); //ME2ICRF
return error;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* in2me - from inertial to moon fixed (ME)
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void in2me (double *jd, double *te, short int derivation)
{
double ra, dec, w, lib[3], d, T, tb1[9], tb2[9], tb3[9], tb32[9],
E1, E2, E3, E4, E5, E6, E7, E8, E9, E10, E11, E12, E13;
d = jd[0] - 2451545.0 + jd[1];
T = d / 36525.0;
E1 = 125.045 - 0.0529921 * d;
E2 = 250.089 - 0.1059842 * d;
E3 = 260.008 + 13.0120009 * d;
E4 = 176.625 + 13.3407154 * d;
E5 = 357.529 + 0.9856003 * d;
E6 = 311.589 + 26.4057084 * d;
E7 = 134.963 + 13.0649930 * d;
E8 = 276.617 + 0.3287146 * d;
E9 = 34.226 + 1.7484877 * d;
E10 = 15.134 - 0.1589763 * d;
E11 = 119.743 + 0.0036096 * d;
E12 = 239.961 + 0.1643573 * d;
E13 = 25.053 + 12.9590088 * d;
ra = 269.9949 + 0.0031 * T - 3.8787 * sin (E1 * DEG2RAD)
- 0.1204 * sin (E2 * DEG2RAD) + 0.0700 * sin (E3 * DEG2RAD)
- 0.0172 * sin (E4 * DEG2RAD) + 0.0072 * sin (E6 * DEG2RAD)
- 0.0052 * sin (E10 * DEG2RAD) + 0.0043 * sin (E13 * DEG2RAD);
dec = 66.5392 + 0.0130 * T + 1.5419 * cos (E1 * DEG2RAD)
+ 0.0239 * cos (E2 * DEG2RAD) - 0.0278 * cos (E3 * DEG2RAD)
+ 0.0068 * cos (E4 * DEG2RAD) - 0.0029 * cos (E6 * DEG2RAD)
+ 0.0009 * cos (E7 * DEG2RAD) + 0.0008 * cos (E10 * DEG2RAD)
- 0.0009 * cos (E13 * DEG2RAD);
w = 38.3213 + 13.17635815 * d - 1.4e-12 * d * d
+ 3.5610 * sin (E1 * DEG2RAD) + 0.1208 * sin (E2 * DEG2RAD)
- 0.0642 * sin (E3 * DEG2RAD) + 0.0158 * sin (E4 * DEG2RAD)
+ 0.0252 * sin (E5 * DEG2RAD) - 0.0066 * sin (E6 * DEG2RAD)
- 0.0047 * sin (E7 * DEG2RAD) - 0.0046 * sin (E8 * DEG2RAD)
+ 0.0028 * sin (E9 * DEG2RAD) + 0.0052 * sin (E10 * DEG2RAD)
+ 0.0040 * sin (E11 * DEG2RAD) + 0.0019 * sin (E12 * DEG2RAD)
- 0.0044 * sin (E13 * DEG2RAD);
lib[0] = (90.0 + ra) * DEG2RAD;
lib[1] = (90.0 - dec) * DEG2RAD;
lib[2] = w * DEG2RAD;
rotmatz (lib[0], tb1, 0);
rotmatx (lib[1], tb2, 0);
rotmatz (lib[2], tb3, 0);
if (derivation == 1)
rotmatz (lib[0], tb1, 1);
if (derivation == 2)
rotmatx (lib[1], tb2, 1);
if (derivation == 3)
rotmatz (lib[2], tb3, 1);
brmul(tb3, tb2, 3, 3, 3, tb32);
brmul(tb32, tb1, 3, 3, 3, te);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* me2pa - simulate doppler observable
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void me2pa (double *te)
{
double tb1[9], tb2[9], tb3[9], tb32[9];
rotmatx ( 0.1462 * ASEC2RAD, tb1, 0);
rotmaty (79.0768 * ASEC2RAD, tb2, 0);
rotmatz (63.8986 * ASEC2RAD, tb3, 0);
brmul(tb3, tb2, 3, 3, 3, tb32);
brmul(tb32, tb1, 3, 3, 3, te);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* rotmatx -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void rotmatx (double rad, double *matx, short int deri)
{
double cosst, sinst;
cosst = cos(rad);
sinst = sin(rad);
matx[0] = 1;
matx[1] = 0;
matx[2] = 0;
matx[3] = 0;
matx[4] = cosst;
matx[5] = sinst;
matx[6] = 0;
matx[7] = -sinst;
matx[8] = cosst;
if (deri == 1)
{
matx[0] = 0;
matx[1] = 0;
matx[2] = 0;
matx[3] = 0;
matx[4] = -sinst;
matx[5] = cosst;
matx[6] = 0;
matx[7] = -cosst;
matx[8] = -sinst;
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* rotmaty -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void rotmaty (double rad, double *maty, short int deri)
{
double cosst, sinst;
cosst = cos(rad);
sinst = sin(rad);
maty[0] = cosst;
maty[1] = 0;
maty[2] = -sinst;
maty[3] = 0;
maty[4] = 1;
maty[5] = 0;
maty[6] = sinst;
maty[7] = 0;
maty[8] = cosst;
if (deri == 1)
{
maty[0] = -sinst;
maty[1] = 0;
maty[2] = -cosst;
maty[3] = 0;
maty[4] = 0;
maty[5] = 0;
maty[6] = cosst;
maty[7] = 0;
maty[8] = -sinst;
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* rotmatz -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
void rotmatz (double rad, double *matz, short int deri)
{
double cosst, sinst;
cosst = cos(rad);
sinst = sin(rad);
matz[0] = cosst;
matz[1] = sinst;
matz[2] = 0;
matz[3] = -sinst;
matz[4] = cosst;
matz[5] = 0;
matz[6] = 0;
matz[7] = 0;
matz[8] = 1;
if (deri == 1)
{
matz[0] = -sinst;
matz[1] = cosst;
matz[2] = 0;
matz[3] = -cosst;
matz[4] = -sinst;
matz[5] = 0;
matz[6] = 0;
matz[7] = 0;
matz[8] = 0;
}
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/****************************************************************************/
/* */
/* Functions for Runge-Kutta integrator */
/* */
/* Version: 2009-9-8 */
/* */
/* Copyright (c) 2009 shangkun@shao.ac.cn All Right Reserved */
/* */
/****************************************************************************/
/*
Version: 2009-9-8
Version: 2009-9-13 integrate forwards & backwards
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double rkf78_auto (double h, double t, double *x, int dim, double err,
double (*fun)(double,double *,double *), int autoadjust)
/*
purpose: auto-adjusted Runge-Kutta-Ful... integrator
input: double h integration step
double t integrate from t to t+h
double *x x(t)
int dim dim(x)
double err tolerance of step control
double (*fun)() right(force) function
output: double *x x(t+h)
return: h new step after adjustment
*/
{
int i, j, n, flag = 0;
double *y, *k, *f, d = 0, tn;
double a[13] = { 0, 2.0/27, 1.0/9, 1.0/6, 5.0/12, 1.0/2, 5.0/6, 1.0/6,
2.0/3, 1.0/3, 1.0, 0, 1.0 };
double c[13] = { 0, 0, 0, 0, 0, 34.0/105, 9.0/35, 9.0/35, 9.0/280,
9.0/280, 0, 41.0/840, 41.0/840 };
double b[13][12] =
{
{0},
{2.0/27},
{1.0/36,1.0/12},
{1.0/24,0,1.0/8},
{5.0/12,0,-25.0/16,25.0/16},
{1.0/20,0,0,1.0/4,1.0/5},
{-25.0/108,0,0,125.0/108,-65.0/27,125.0/54},
{31.0/300,0,0,0,61.0/225,-2.0/9,13.0/900},
{2.0,0,0,-53.0/6,704.0/45,-107.0/9,67.0/90,3.0},
{-91.0/108,0,0,23.0/108,-976.0/135,311.0/54,-19.0/60,17.0/6,-1.0/12},
{2383.0/4100,0,0,-341.0/164,4496.0/1025,-301.0/82,2133.0/4100,
45.0/82,45.0/164,18.0/41},
{3.0/205,0,0,0,0,-6.0/41,-3.0/205,-3.0/41,3.0/41,6.0/41},
{-1777.0/4100,0,0,-341.0/164,4496.0/1025,-289.0/82,2193.0/4100,
51.0/82,33.0/164,12.0/41,0,1.0}
};
y = (double *) calloc (dim, sizeof(double));
k = (double *) calloc (dim*13, sizeof(double));
f = (double *) calloc (dim, sizeof(double));
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
do
{
for (i = 0; i <= 12; i++)
{
tn = t + a[i] * h;
for (n = 0; n <= dim - 1; n++)
{
y[n] = x[n];
for (j = 0; j <= i-1; j++)
y[n] = y[n] + h * b[i][j] * k[n*13+j];
}
fun (tn,y,f);
for (n = 0; n <= dim - 1; n++)
{
k[n*13+i] = f[n];
}
}
d = 0;
for (n = 0; n <= dim - 1; n++)
{
d = d + fabs (41.0 / 840 * (k[n*13+0] + k[n*13+10]
- k[n*13+11] - k[n*13+12]) * h);
}
flag = 0;
if (autoadjust == 1)
{
if (d > err) //adapting step h
{
h = h/2.0;
flag = 1;
}
if ( (d < err * 1e-4) && (h < 5e-3))
{
h = h*2.0;
flag = 2;
}
}
}while (flag == 1);
for (n = 0; n <= dim - 1; n++)
{
for (i = 0; i <= 12; i++)
x[n] = x[n] + h * c[i] * k[n*13+i];
}
free (y);
free (f);
free (k);
return h;
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* enlgr -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double enlgr (double *x, double *y, int n, double t)
{
int i, j, k, m;
double z, s;
z = 0.0;
if (n < 1)
return (z);
if (n == 1)
{
z = y[0];
return (z);
}
if (n == 2)
{
z = (y[0] * (t - x[1]) - y[1] * (t - x[0])) / (x[0] - x[1]);
return(z);
}
i = 0;
while ((x[i] < t) && (i < n))
i = i + 1;
k = i - 4;
if (k < 0)
k = 0;
m = i + 3;
if (m > n - 1)
m = n - 1;
for (i = k; i <= m; i++)
{
s = 1.0;
for (j = k; j <= m; j++)
{
if (j != i)
s = s * (t - x[j]) / (x[i] - x[j]);
}
z = z + s * y[i];
}
return (z);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* bssgj -
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
int bssgj (double *a,int n)
{
int i, j, k, m;
double w, g, *b;
b = (double *)malloc (n * sizeof(double));
for (k = 0; k <= n - 1; k++)
{
w = a[0];
if (fabs (w) + 1.0 == 1.0)
{
free (b);
printf ("fail\n");
return (-2);
}
m = n - k - 1;
for (i = 1; i <= n - 1; i++)
{
g = a[i * n];
b[i] = g / w;
if (i <= m)
b[i] = - b[i];
for (j = 1; j <= i; j++)
a[(i - 1) * n + j - 1] = a[i * n + j] + g * b[j];
}
a[n * n - 1] = 1.0 / w;
for (i = 1; i <= n - 1; i++)
a[(n - 1) * n + i - 1] = b[i];
}
for (i=0; i<=n-2; i++)
{
for (j=i+1; j<=n-1; j++)
a[i*n+j]=a[j*n+i];
}
free(b);
return(2);
}
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
/*
* iauDtdb - precise tdb-tt correction: better than +/- 3 nanoseconds
* @param1: description of param1
* @param2: description of param2
*
* version: 20 Aug 2010
*/
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/
double iauDtdb(double date1, double date2,
double ut, double elong, double u, double v)
/*
** - - - - - - - -
** i a u D t d b
** - - - - - - - -
**
** An approximation to TDB-TT, the difference between barycentric
** dynamical time and terrestrial time, for an observer on the Earth.
**
** The different time scales - proper, coordinate and realized - are
** related to each other:
**
** TAI <- physically realized
** :
** offset <- observed (nominally +32.184s)
** :
** TT <- terrestrial time
** :
** rate adjustment (L_G) <- definition of TT
** :
** TCG <- time scale for GCRS
** :
** "periodic" terms <- iauDtdb is an implementation
** :
** rate adjustment (L_C) <- function of solar-system ephemeris
** :
** TCB <- time scale for BCRS
** :
** rate adjustment (-L_B) <- definition of TDB
** :
** TDB <- TCB scaled to track TT
** :
** "periodic" terms <- -iau_DTDB is an approximation
** :
** TT <- terrestrial time
**
** Adopted values for the various constants can be found in the IERS
** Conventions (McCarthy & Petit 2003).
**
** This function is part of the International Astronomical Union's
** SOFA (Standards Of Fundamental Astronomy) software collection.
**
** Status: canonical model.
**
** Given:
** date1,date2 double date, TDB (Notes 1-3)
** ut double universal time (UT1, fraction of one day)
** elong double longitude (east positive, radians)
** u double distance from Earth spin axis (km)
** v double distance north of equatorial plane (km)
**
** Returned (function value):
** double TDB-TT (seconds)
**
** Notes:
**
** 1) The TT date date1+date2 is a Julian Date, apportioned in any
** convenient way between the two arguments. For example,
** JD(TT)=2450123.7 could be expressed in any of these ways,
** among others:
**
** date1 date2
**
** 2450123.7 0.0 (JD method)
** 2451545.0 -1421.3 (J2000 method)
** 2400000.5 50123.2 (MJD method)
** 2450123.5 0.2 (date & time method)
**
** The JD method is the most natural and convenient to use in
** cases where the loss of several decimal digits of resolution
** is acceptable. The J2000 method is best matched to the way
** the argument is handled internally and will deliver the
** optimum resolution. The MJD method and the date & time methods
** are both good compromises between resolution and convenience.
**
** Although the date is, formally, barycentric dynamical time (TDB),
** the terrestrial dynamical time (TT) can be used with no practical
** effect on the accuracy of the prediction.
**
** 2) TT can be regarded as a coordinate time that is realized as an
** offset of 32.184s from International Atomic Time, TAI. TT is a
** specific linear transformation of geocentric coordinate time TCG,
** which is the time scale for the Geocentric Celestial Reference
** System, GCRS.
**
** 3) TDB is a coordinate time, and is a specific linear transformation
** of barycentric coordinate time TCB, which is the time scale for
** the Barycentric Celestial Reference System, BCRS.
**
** 4) The difference TCG-TCB depends on the masses and positions of the
** bodies of the solar system and the velocity of the Earth. It is
** dominated by a rate difference, the residual being of a periodic
** character. The latter, which is modeled by the present function,
** comprises a main (annual) sinusoidal term of amplitude
** approximately 0.00166 seconds, plus planetary terms up to about
** 20 microseconds, and lunar and diurnal terms up to 2 microseconds.
** These effects come from the changing transverse Doppler effect
** and gravitational red-shift as the observer (on the Earth's
** surface) experiences variations in speed (with respect to the
** BCRS) and gravitational potential.
**
** 5) TDB can be regarded as the same as TCB but with a rate adjustment
** to keep it close to TT, which is convenient for many applications.
** The history of successive attempts to define TDB is set out in
** Resolution 3 adopted by the IAU General Assembly in 2006, which
** defines a fixed TDB(TCB) transformation that is consistent with
** contemporary solar-system ephemerides. Future ephemerides will
** imply slightly changed transformations between TCG and TCB, which
** could introduce a linear drift between TDB and TT; however, any
** such drift is unlikely to exceed 1 nanosecond per century.
**
** 6) The geocentric TDB-TT model used in the present function is that of
** Fairhead & Bretagnon (1990), in its full form. It was originally
** supplied by Fairhead (private communications with P.T.Wallace,
** 1990) as a Fortran subroutine. The present C function contains an
** adaptation of the Fairhead code. The numerical results are
** essentially unaffected by the changes, the differences with
** respect to the Fairhead & Bretagnon original being at the 1e-20 s
** level.
**
** The topocentric part of the model is from Moyer (1981) and
** Murray (1983), with fundamental arguments adapted from
** Simon et al. 1994. It is an approximation to the expression
** ( v / c ) . ( r / c ), where v is the barycentric velocity of
** the Earth, r is the geocentric position of the observer and
** c is the speed of light.
**
** By supplying zeroes for u and v, the topocentric part of the
** model can be nullified, and the function will return the Fairhead
** & Bretagnon result alone.
**
** 7) During the interval 1950-2050, the absolute accuracy is better
** than +/- 3 nanoseconds relative to time ephemerides obtained by
** direct numerical integrations based on the JPL DE405 solar system
** ephemeris.
**
** 8) It must be stressed that the present function is merely a model,
** and that numerical integration of solar-system ephemerides is the
** definitive method for predicting the relationship between TCG and
** TCB and hence between TT and TDB.
**
** References:
**
** Fairhead, L., & Bretagnon, P., Astron.Astrophys., 229, 240-247
** (1990).
**
** IAU 2006 Resolution 3.
**
** McCarthy, D. D., Petit, G. (eds.), IERS Conventions (2003),
** IERS Technical Note No. 32, BKG (2004)
**
** Moyer, T.D., Cel.Mech., 23, 33 (1981).
**
** Murray, C.A., Vectorial Astrometry, Adam Hilger (1983).
**
** Seidelmann, P.K. et al., Explanatory Supplement to the
** Astronomical Almanac, Chapter 2, University Science Books (1992).
**
** Simon, J.L., Bretagnon, P., Chapront, J., Chapront-Touze, M.,
** Francou, G. & Laskar, J., Astron.Astrophys., 282, 663-683 (1994).
**
** This revision: 2008 May 24
**
** Copyright (C) 2008 IAU SOFA Review Board. See notes at end.
*/
{
double t, tsol, w, elsun, emsun, d, elj, els, wt, w0, w1, w2, w3, w4,
wf, wj;
int j;
/*
** =====================
** Fairhead et al. model
** =====================
**
** 787 sets of three coefficients.
**
** Each set is
** amplitude (microseconds)
** frequency (radians per Julian millennium since J2000)
** phase (radians)
**
** Sets 1-474 are the T**0 terms
** " 475-679 " " T**1
** " 680-764 " " T**2
** " 765-784 " " T**3
** " 785-787 " " T**4
*/
static const double fairhd[787][3] = {
/* 1, 10 */
{ 1656.674564e-6, 6283.075849991, 6.240054195 },
{ 22.417471e-6, 5753.384884897, 4.296977442 },
{ 13.839792e-6, 12566.151699983, 6.196904410 },
{ 4.770086e-6, 529.690965095, 0.444401603 },
{ 4.676740e-6, 6069.776754553, 4.021195093 },
{ 2.256707e-6, 213.299095438, 5.543113262 },
{ 1.694205e-6, -3.523118349, 5.025132748 },
{ 1.554905e-6, 77713.771467920, 5.198467090 },
{ 1.276839e-6, 7860.419392439, 5.988822341 },
{ 1.193379e-6, 5223.693919802, 3.649823730 },
/* 11, 20 */
{ 1.115322e-6, 3930.209696220, 1.422745069 },
{ 0.794185e-6, 11506.769769794, 2.322313077 },
{ 0.447061e-6, 26.298319800, 3.615796498 },
{ 0.435206e-6, -398.149003408, 4.349338347 },
{ 0.600309e-6, 1577.343542448, 2.678271909 },
{ 0.496817e-6, 6208.294251424, 5.696701824 },
{ 0.486306e-6, 5884.926846583, 0.520007179 },
{ 0.432392e-6, 74.781598567, 2.435898309 },
{ 0.468597e-6, 6244.942814354, 5.866398759 },
{ 0.375510e-6, 5507.553238667, 4.103476804 },
/* 21, 30 */
{ 0.243085e-6, -775.522611324, 3.651837925 },
{ 0.173435e-6, 18849.227549974, 6.153743485 },
{ 0.230685e-6, 5856.477659115, 4.773852582 },
{ 0.203747e-6, 12036.460734888, 4.333987818 },
{ 0.143935e-6, -796.298006816, 5.957517795 },
{ 0.159080e-6, 10977.078804699, 1.890075226 },
{ 0.119979e-6, 38.133035638, 4.551585768 },
{ 0.118971e-6, 5486.777843175, 1.914547226 },
{ 0.116120e-6, 1059.381930189, 0.873504123 },
{ 0.137927e-6, 11790.629088659, 1.135934669 },
/* 31, 40 */
{ 0.098358e-6, 2544.314419883, 0.092793886 },
{ 0.101868e-6, -5573.142801634, 5.984503847 },
{ 0.080164e-6, 206.185548437, 2.095377709 },
{ 0.079645e-6, 4694.002954708, 2.949233637 },
{ 0.062617e-6, 20.775395492, 2.654394814 },
{ 0.075019e-6, 2942.463423292, 4.980931759 },
{ 0.064397e-6, 5746.271337896, 1.280308748 },
{ 0.063814e-6, 5760.498431898, 4.167901731 },
{ 0.048042e-6, 2146.165416475, 1.495846011 },
{ 0.048373e-6, 155.420399434, 2.251573730 },
/* 41, 50 */
{ 0.058844e-6, 426.598190876, 4.839650148 },
{ 0.046551e-6, -0.980321068, 0.921573539 },
{ 0.054139e-6, 17260.154654690, 3.411091093 },
{ 0.042411e-6, 6275.962302991, 2.869567043 },
{ 0.040184e-6, -7.113547001, 3.565975565 },
{ 0.036564e-6, 5088.628839767, 3.324679049 },
{ 0.040759e-6, 12352.852604545, 3.981496998 },
{ 0.036507e-6, 801.820931124, 6.248866009 },
{ 0.036955e-6, 3154.687084896, 5.071801441 },
{ 0.042732e-6, 632.783739313, 5.720622217 },
/* 51, 60 */
{ 0.042560e-6, 161000.685737473, 1.270837679 },
{ 0.040480e-6, 15720.838784878, 2.546610123 },
{ 0.028244e-6, -6286.598968340, 5.069663519 },
{ 0.033477e-6, 6062.663207553, 4.144987272 },
{ 0.034867e-6, 522.577418094, 5.210064075 },
{ 0.032438e-6, 6076.890301554, 0.749317412 },
{ 0.030215e-6, 7084.896781115, 3.389610345 },
{ 0.029247e-6, -71430.695617928, 4.183178762 },
{ 0.033529e-6, 9437.762934887, 2.404714239 },
{ 0.032423e-6, 8827.390269875, 5.541473556 },
/* 61, 70 */
{ 0.027567e-6, 6279.552731642, 5.040846034 },
{ 0.029862e-6, 12139.553509107, 1.770181024 },
{ 0.022509e-6, 10447.387839604, 1.460726241 },
{ 0.020937e-6, 8429.241266467, 0.652303414 },
{ 0.020322e-6, 419.484643875, 3.735430632 },
{ 0.024816e-6, -1194.447010225, 1.087136918 },
{ 0.025196e-6, 1748.016413067, 2.901883301 },
{ 0.021691e-6, 14143.495242431, 5.952658009 },
{ 0.017673e-6, 6812.766815086, 3.186129845 },
{ 0.022567e-6, 6133.512652857, 3.307984806 },
/* 71, 80 */
{ 0.016155e-6, 10213.285546211, 1.331103168 },
{ 0.014751e-6, 1349.867409659, 4.308933301 },
{ 0.015949e-6, -220.412642439, 4.005298270 },
{ 0.015974e-6, -2352.866153772, 6.145309371 },
{ 0.014223e-6, 17789.845619785, 2.104551349 },
{ 0.017806e-6, 73.297125859, 3.475975097 },
{ 0.013671e-6, -536.804512095, 5.971672571 },
{ 0.011942e-6, 8031.092263058, 2.053414715 },
{ 0.014318e-6, 16730.463689596, 3.016058075 },
{ 0.012462e-6, 103.092774219, 1.737438797 },
/* 81, 90 */
{ 0.010962e-6, 3.590428652, 2.196567739 },
{ 0.015078e-6, 19651.048481098, 3.969480770 },
{ 0.010396e-6, 951.718406251, 5.717799605 },
{ 0.011707e-6, -4705.732307544, 2.654125618 },
{ 0.010453e-6, 5863.591206116, 1.913704550 },
{ 0.012420e-6, 4690.479836359, 4.734090399 },
{ 0.011847e-6, 5643.178563677, 5.489005403 },
{ 0.008610e-6, 3340.612426700, 3.661698944 },
{ 0.011622e-6, 5120.601145584, 4.863931876 },
{ 0.010825e-6, 553.569402842, 0.842715011 },
/* 91, 100 */
{ 0.008666e-6, -135.065080035, 3.293406547 },
{ 0.009963e-6, 149.563197135, 4.870690598 },
{ 0.009858e-6, 6309.374169791, 1.061816410 },
{ 0.007959e-6, 316.391869657, 2.465042647 },
{ 0.010099e-6, 283.859318865, 1.942176992 },
{ 0.007147e-6, -242.728603974, 3.661486981 },
{ 0.007505e-6, 5230.807466803, 4.920937029 },
{ 0.008323e-6, 11769.853693166, 1.229392026 },
{ 0.007490e-6, -6256.777530192, 3.658444681 },
{ 0.009370e-6, 149854.400134205, 0.673880395 },
/* 101, 110 */
{ 0.007117e-6, 38.027672636, 5.294249518 },
{ 0.007857e-6, 12168.002696575, 0.525733528 },
{ 0.007019e-6, 6206.809778716, 0.837688810 },
{ 0.006056e-6, 955.599741609, 4.194535082 },
{ 0.008107e-6, 13367.972631107, 3.793235253 },
{ 0.006731e-6, 5650.292110678, 5.639906583 },
{ 0.007332e-6, 36.648562930, 0.114858677 },
{ 0.006366e-6, 4164.311989613, 2.262081818 },
{ 0.006858e-6, 5216.580372801, 0.642063318 },
{ 0.006919e-6, 6681.224853400, 6.018501522 },
/* 111, 120 */
{ 0.006826e-6, 7632.943259650, 3.458654112 },
{ 0.005308e-6, -1592.596013633, 2.500382359 },
{ 0.005096e-6, 11371.704689758, 2.547107806 },
{ 0.004841e-6, 5333.900241022, 0.437078094 },
{ 0.005582e-6, 5966.683980335, 2.246174308 },
{ 0.006304e-6, 11926.254413669, 2.512929171 },
{ 0.006603e-6, 23581.258177318, 5.393136889 },
{ 0.005123e-6, -1.484472708, 2.999641028 },
{ 0.004648e-6, 1589.072895284, 1.275847090 },
{ 0.005119e-6, 6438.496249426, 1.486539246 },
/* 121, 130 */
{ 0.004521e-6, 4292.330832950, 6.140635794 },
{ 0.005680e-6, 23013.539539587, 4.557814849 },
{ 0.005488e-6, -3.455808046, 0.090675389 },
{ 0.004193e-6, 7234.794256242, 4.869091389 },
{ 0.003742e-6, 7238.675591600, 4.691976180 },
{ 0.004148e-6, -110.206321219, 3.016173439 },
{ 0.004553e-6, 11499.656222793, 5.554998314 },
{ 0.004892e-6, 5436.993015240, 1.475415597 },
{ 0.004044e-6, 4732.030627343, 1.398784824 },
{ 0.004164e-6, 12491.370101415, 5.650931916 },
/* 131, 140 */
{ 0.004349e-6, 11513.883316794, 2.181745369 },
{ 0.003919e-6, 12528.018664345, 5.823319737 },
{ 0.003129e-6, 6836.645252834, 0.003844094 },
{ 0.004080e-6, -7058.598461315, 3.690360123 },
{ 0.003270e-6, 76.266071276, 1.517189902 },
{ 0.002954e-6, 6283.143160294, 4.447203799 },
{ 0.002872e-6, 28.449187468, 1.158692983 },
{ 0.002881e-6, 735.876513532, 0.349250250 },
{ 0.003279e-6, 5849.364112115, 4.893384368 },
{ 0.003625e-6, 6209.778724132, 1.473760578 },
/* 141, 150 */
{ 0.003074e-6, 949.175608970, 5.185878737 },
{ 0.002775e-6, 9917.696874510, 1.030026325 },
{ 0.002646e-6, 10973.555686350, 3.918259169 },
{ 0.002575e-6, 25132.303399966, 6.109659023 },
{ 0.003500e-6, 263.083923373, 1.892100742 },
{ 0.002740e-6, 18319.536584880, 4.320519510 },
{ 0.002464e-6, 202.253395174, 4.698203059 },
{ 0.002409e-6, 2.542797281, 5.325009315 },
{ 0.003354e-6, -90955.551694697, 1.942656623 },
{ 0.002296e-6, 6496.374945429, 5.061810696 },
/* 151, 160 */
{ 0.003002e-6, 6172.869528772, 2.797822767 },
{ 0.003202e-6, 27511.467873537, 0.531673101 },
{ 0.002954e-6, -6283.008539689, 4.533471191 },
{ 0.002353e-6, 639.897286314, 3.734548088 },
{ 0.002401e-6, 16200.772724501, 2.605547070 },
{ 0.003053e-6, 233141.314403759, 3.029030662 },
{ 0.003024e-6, 83286.914269554, 2.355556099 },
{ 0.002863e-6, 17298.182327326, 5.240963796 },
{ 0.002103e-6, -7079.373856808, 5.756641637 },
{ 0.002303e-6, 83996.847317911, 2.013686814 },
/* 161, 170 */
{ 0.002303e-6, 18073.704938650, 1.089100410 },
{ 0.002381e-6, 63.735898303, 0.759188178 },
{ 0.002493e-6, 6386.168624210, 0.645026535 },
{ 0.002366e-6, 3.932153263, 6.215885448 },
{ 0.002169e-6, 11015.106477335, 4.845297676 },
{ 0.002397e-6, 6243.458341645, 3.809290043 },
{ 0.002183e-6, 1162.474704408, 6.179611691 },
{ 0.002353e-6, 6246.427287062, 4.781719760 },
{ 0.002199e-6, -245.831646229, 5.956152284 },
{ 0.001729e-6, 3894.181829542, 1.264976635 },
/* 171, 180 */
{ 0.001896e-6, -3128.388765096, 4.914231596 },
{ 0.002085e-6, 35.164090221, 1.405158503 },
{ 0.002024e-6, 14712.317116458, 2.752035928 },
{ 0.001737e-6, 6290.189396992, 5.280820144 },
{ 0.002229e-6, 491.557929457, 1.571007057 },
{ 0.001602e-6, 14314.168113050, 4.203664806 },
{ 0.002186e-6, 454.909366527, 1.402101526 },
{ 0.001897e-6, 22483.848574493, 4.167932508 },
{ 0.001825e-6, -3738.761430108, 0.545828785 },
{ 0.001894e-6, 1052.268383188, 5.817167450 },
/* 181, 190 */
{ 0.001421e-6, 20.355319399, 2.419886601 },
{ 0.001408e-6, 10984.192351700, 2.732084787 },
{ 0.001847e-6, 10873.986030480, 2.903477885 },
{ 0.001391e-6, -8635.942003763, 0.593891500 },
{ 0.001388e-6, -7.046236698, 1.166145902 },
{ 0.001810e-6, -88860.057071188, 0.487355242 },
{ 0.001288e-6, -1990.745017041, 3.913022880 },
{ 0.001297e-6, 23543.230504682, 3.063805171 },
{ 0.001335e-6, -266.607041722, 3.995764039 },
{ 0.001376e-6, 10969.965257698, 5.152914309 },
/* 191, 200 */
{ 0.001745e-6, 244287.600007027, 3.626395673 },
{ 0.001649e-6, 31441.677569757, 1.952049260 },
{ 0.001416e-6, 9225.539273283, 4.996408389 },
{ 0.001238e-6, 4804.209275927, 5.503379738 },
{ 0.001472e-6, 4590.910180489, 4.164913291 },
{ 0.001169e-6, 6040.347246017, 5.841719038 },
{ 0.001039e-6, 5540.085789459, 2.769753519 },
{ 0.001004e-6, -170.672870619, 0.755008103 },
{ 0.001284e-6, 10575.406682942, 5.306538209 },
{ 0.001278e-6, 71.812653151, 4.713486491 },
/* 201, 210 */
{ 0.001321e-6, 18209.330263660, 2.624866359 },
{ 0.001297e-6, 21228.392023546, 0.382603541 },
{ 0.000954e-6, 6282.095528923, 0.882213514 },
{ 0.001145e-6, 6058.731054289, 1.169483931 },
{ 0.000979e-6, 5547.199336460, 5.448375984 },
{ 0.000987e-6, -6262.300454499, 2.656486959 },
{ 0.001070e-6, -154717.609887482, 1.827624012 },
{ 0.000991e-6, 4701.116501708, 4.387001801 },
{ 0.001155e-6, -14.227094002, 3.042700750 },
{ 0.001176e-6, 277.034993741, 3.335519004 },
/* 211, 220 */
{ 0.000890e-6, 13916.019109642, 5.601498297 },
{ 0.000884e-6, -1551.045222648, 1.088831705 },
{ 0.000876e-6, 5017.508371365, 3.969902609 },
{ 0.000806e-6, 15110.466119866, 5.142876744 },
{ 0.000773e-6, -4136.910433516, 0.022067765 },
{ 0.001077e-6, 175.166059800, 1.844913056 },
{ 0.000954e-6, -6284.056171060, 0.968480906 },
{ 0.000737e-6, 5326.786694021, 4.923831588 },
{ 0.000845e-6, -433.711737877, 4.749245231 },
{ 0.000819e-6, 8662.240323563, 5.991247817 },
/* 221, 230 */
{ 0.000852e-6, 199.072001436, 2.189604979 },
{ 0.000723e-6, 17256.631536341, 6.068719637 },
{ 0.000940e-6, 6037.244203762, 6.197428148 },
{ 0.000885e-6, 11712.955318231, 3.280414875 },
{ 0.000706e-6, 12559.038152982, 2.824848947 },
{ 0.000732e-6, 2379.164473572, 2.501813417 },
{ 0.000764e-6, -6127.655450557, 2.236346329 },
{ 0.000908e-6, 131.541961686, 2.521257490 },
{ 0.000907e-6, 35371.887265976, 3.370195967 },
{ 0.000673e-6, 1066.495477190, 3.876512374 },
/* 231, 240 */
{ 0.000814e-6, 17654.780539750, 4.627122566 },
{ 0.000630e-6, 36.027866677, 0.156368499 },
{ 0.000798e-6, 515.463871093, 5.151962502 },
{ 0.000798e-6, 148.078724426, 5.909225055 },
{ 0.000806e-6, 309.278322656, 6.054064447 },
{ 0.000607e-6, -39.617508346, 2.839021623 },
{ 0.000601e-6, 412.371096874, 3.984225404 },
{ 0.000646e-6, 11403.676995575, 3.852959484 },
{ 0.000704e-6, 13521.751441591, 2.300991267 },
{ 0.000603e-6, -65147.619767937, 4.140083146 },
/* 241, 250 */
{ 0.000609e-6, 10177.257679534, 0.437122327 },
{ 0.000631e-6, 5767.611978898, 4.026532329 },
{ 0.000576e-6, 11087.285125918, 4.760293101 },
{ 0.000674e-6, 14945.316173554, 6.270510511 },
{ 0.000726e-6, 5429.879468239, 6.039606892 },
{ 0.000710e-6, 28766.924424484, 5.672617711 },
{ 0.000647e-6, 11856.218651625, 3.397132627 },
{ 0.000678e-6, -5481.254918868, 6.249666675 },
{ 0.000618e-6, 22003.914634870, 2.466427018 },
{ 0.000738e-6, 6134.997125565, 2.242668890 },
/* 251, 260 */
{ 0.000660e-6, 625.670192312, 5.864091907 },
{ 0.000694e-6, 3496.032826134, 2.668309141 },
{ 0.000531e-6, 6489.261398429, 1.681888780 },
{ 0.000611e-6, -143571.324284214, 2.424978312 },
{ 0.000575e-6, 12043.574281889, 4.216492400 },
{ 0.000553e-6, 12416.588502848, 4.772158039 },
{ 0.000689e-6, 4686.889407707, 6.224271088 },
{ 0.000495e-6, 7342.457780181, 3.817285811 },
{ 0.000567e-6, 3634.621024518, 1.649264690 },
{ 0.000515e-6, 18635.928454536, 3.945345892 },
/* 261, 270 */
{ 0.000486e-6, -323.505416657, 4.061673868 },
{ 0.000662e-6, 25158.601719765, 1.794058369 },
{ 0.000509e-6, 846.082834751, 3.053874588 },
{ 0.000472e-6, -12569.674818332, 5.112133338 },
{ 0.000461e-6, 6179.983075773, 0.513669325 },
{ 0.000641e-6, 83467.156352816, 3.210727723 },
{ 0.000520e-6, 10344.295065386, 2.445597761 },
{ 0.000493e-6, 18422.629359098, 1.676939306 },
{ 0.000478e-6, 1265.567478626, 5.487314569 },
{ 0.000472e-6, -18.159247265, 1.999707589 },
/* 271, 280 */
{ 0.000559e-6, 11190.377900137, 5.783236356 },
{ 0.000494e-6, 9623.688276691, 3.022645053 },
{ 0.000463e-6, 5739.157790895, 1.411223013 },
{ 0.000432e-6, 16858.482532933, 1.179256434 },
{ 0.000574e-6, 72140.628666286, 1.758191830 },
{ 0.000484e-6, 17267.268201691, 3.290589143 },
{ 0.000550e-6, 4907.302050146, 0.864024298 },
{ 0.000399e-6, 14.977853527, 2.094441910 },
{ 0.000491e-6, 224.344795702, 0.878372791 },
{ 0.000432e-6, 20426.571092422, 6.003829241 },
/* 281, 290 */
{ 0.000481e-6, 5749.452731634, 4.309591964 },
{ 0.000480e-6, 5757.317038160, 1.142348571 },
{ 0.000485e-6, 6702.560493867, 0.210580917 },
{ 0.000426e-6, 6055.549660552, 4.274476529 },
{ 0.000480e-6, 5959.570433334, 5.031351030 },
{ 0.000466e-6, 12562.628581634, 4.959581597 },
{ 0.000520e-6, 39302.096962196, 4.788002889 },
{ 0.000458e-6, 12132.439962106, 1.880103788 },
{ 0.000470e-6, 12029.347187887, 1.405611197 },
{ 0.000416e-6, -7477.522860216, 1.082356330 },
/* 291, 300 */
{ 0.000449e-6, 11609.862544012, 4.179989585 },
{ 0.000465e-6, 17253.041107690, 0.353496295 },
{ 0.000362e-6, -4535.059436924, 1.583849576 },
{ 0.000383e-6, 21954.157609398, 3.747376371 },
{ 0.000389e-6, 17.252277143, 1.395753179 },
{ 0.000331e-6, 18052.929543158, 0.566790582 },
{ 0.000430e-6, 13517.870106233, 0.685827538 },
{ 0.000368e-6, -5756.908003246, 0.731374317 },
{ 0.000330e-6, 10557.594160824, 3.710043680 },
{ 0.000332e-6, 20199.094959633, 1.652901407 },
/* 301, 310 */
{ 0.000384e-6, 11933.367960670, 5.827781531 },
{ 0.000387e-6, 10454.501386605, 2.541182564 },
{ 0.000325e-6, 15671.081759407, 2.178850542 },
{ 0.000318e-6, 138.517496871, 2.253253037 },
{ 0.000305e-6, 9388.005909415, 0.578340206 },
{ 0.000352e-6, 5749.861766548, 3.000297967 },
{ 0.000311e-6, 6915.859589305, 1.693574249 },
{ 0.000297e-6, 24072.921469776, 1.997249392 },
{ 0.000363e-6, -640.877607382, 5.071820966 },
{ 0.000323e-6, 12592.450019783, 1.072262823 },
/* 311, 320 */
{ 0.000341e-6, 12146.667056108, 4.700657997 },
{ 0.000290e-6, 9779.108676125, 1.812320441 },
{ 0.000342e-6, 6132.028180148, 4.322238614 },
{ 0.000329e-6, 6268.848755990, 3.033827743 },
{ 0.000374e-6, 17996.031168222, 3.388716544 },
{ 0.000285e-6, -533.214083444, 4.687313233 },
{ 0.000338e-6, 6065.844601290, 0.877776108 },
{ 0.000276e-6, 24.298513841, 0.770299429 },
{ 0.000336e-6, -2388.894020449, 5.353796034 },
{ 0.000290e-6, 3097.883822726, 4.075291557 },
/* 321, 330 */
{ 0.000318e-6, 709.933048357, 5.941207518 },
{ 0.000271e-6, 13095.842665077, 3.208912203 },
{ 0.000331e-6, 6073.708907816, 4.007881169 },
{ 0.000292e-6, 742.990060533, 2.714333592 },
{ 0.000362e-6, 29088.811415985, 3.215977013 },
{ 0.000280e-6, 12359.966151546, 0.710872502 },
{ 0.000267e-6, 10440.274292604, 4.730108488 },
{ 0.000262e-6, 838.969287750, 1.327720272 },
{ 0.000250e-6, 16496.361396202, 0.898769761 },
{ 0.000325e-6, 20597.243963041, 0.180044365 },
/* 331, 340 */
{ 0.000268e-6, 6148.010769956, 5.152666276 },
{ 0.000284e-6, 5636.065016677, 5.655385808 },
{ 0.000301e-6, 6080.822454817, 2.135396205 },
{ 0.000294e-6, -377.373607916, 3.708784168 },
{ 0.000236e-6, 2118.763860378, 1.733578756 },
{ 0.000234e-6, 5867.523359379, 5.575209112 },
{ 0.000268e-6, -226858.238553767, 0.069432392 },
{ 0.000265e-6, 167283.761587465, 4.369302826 },
{ 0.000280e-6, 28237.233459389, 5.304829118 },
{ 0.000292e-6, 12345.739057544, 4.096094132 },
/* 341, 350 */
{ 0.000223e-6, 19800.945956225, 3.069327406 },
{ 0.000301e-6, 43232.306658416, 6.205311188 },
{ 0.000264e-6, 18875.525869774, 1.417263408 },
{ 0.000304e-6, -1823.175188677, 3.409035232 },
{ 0.000301e-6, 109.945688789, 0.510922054 },
{ 0.000260e-6, 813.550283960, 2.389438934 },
{ 0.000299e-6, 316428.228673312, 5.384595078 },
{ 0.000211e-6, 5756.566278634, 3.789392838 },
{ 0.000209e-6, 5750.203491159, 1.661943545 },
{ 0.000240e-6, 12489.885628707, 5.684549045 },
/* 351, 360 */
{ 0.000216e-6, 6303.851245484, 3.862942261 },
{ 0.000203e-6, 1581.959348283, 5.549853589 },
{ 0.000200e-6, 5642.198242609, 1.016115785 },
{ 0.000197e-6, -70.849445304, 4.690702525 },
{ 0.000227e-6, 6287.008003254, 2.911891613 },
{ 0.000197e-6, 533.623118358, 1.048982898 },
{ 0.000205e-6, -6279.485421340, 1.829362730 },
{ 0.000209e-6, -10988.808157535, 2.636140084 },
{ 0.000208e-6, -227.526189440, 4.127883842 },
{ 0.000191e-6, 415.552490612, 4.401165650 },
/* 361, 370 */
{ 0.000190e-6, 29296.615389579, 4.175658539 },
{ 0.000264e-6, 66567.485864652, 4.601102551 },
{ 0.000256e-6, -3646.350377354, 0.506364778 },
{ 0.000188e-6, 13119.721102825, 2.032195842 },
{ 0.000185e-6, -209.366942175, 4.694756586 },
{ 0.000198e-6, 25934.124331089, 3.832703118 },
{ 0.000195e-6, 4061.219215394, 3.308463427 },
{ 0.000234e-6, 5113.487598583, 1.716090661 },
{ 0.000188e-6, 1478.866574064, 5.686865780 },
{ 0.000222e-6, 11823.161639450, 1.942386641 },
/* 371, 380 */
{ 0.000181e-6, 10770.893256262, 1.999482059 },
{ 0.000171e-6, 6546.159773364, 1.182807992 },
{ 0.000206e-6, 70.328180442, 5.934076062 },
{ 0.000169e-6, 20995.392966449, 2.169080622 },
{ 0.000191e-6, 10660.686935042, 5.405515999 },
{ 0.000228e-6, 33019.021112205, 4.656985514 },
{ 0.000184e-6, -4933.208440333, 3.327476868 },
{ 0.000220e-6, -135.625325010, 1.765430262 },
{ 0.000166e-6, 23141.558382925, 3.454132746 },
{ 0.000191e-6, 6144.558353121, 5.020393445 },
/* 381, 390 */
{ 0.000180e-6, 6084.003848555, 0.602182191 },
{ 0.000163e-6, 17782.732072784, 4.960593133 },
{ 0.000225e-6, 16460.333529525, 2.596451817 },
{ 0.000222e-6, 5905.702242076, 3.731990323 },
{ 0.000204e-6, 227.476132789, 5.636192701 },
{ 0.000159e-6, 16737.577236597, 3.600691544 },
{ 0.000200e-6, 6805.653268085, 0.868220961 },
{ 0.000187e-6, 11919.140866668, 2.629456641 },
{ 0.000161e-6, 127.471796607, 2.862574720 },
{ 0.000205e-6, 6286.666278643, 1.742882331 },
/* 391, 400 */
{ 0.000189e-6, 153.778810485, 4.812372643 },
{ 0.000168e-6, 16723.350142595, 0.027860588 },
{ 0.000149e-6, 11720.068865232, 0.659721876 },
{ 0.000189e-6, 5237.921013804, 5.245313000 },
{ 0.000143e-6, 6709.674040867, 4.317625647 },
{ 0.000146e-6, 4487.817406270, 4.815297007 },
{ 0.000144e-6, -664.756045130, 5.381366880 },
{ 0.000175e-6, 5127.714692584, 4.728443327 },
{ 0.000162e-6, 6254.626662524, 1.435132069 },
{ 0.000187e-6, 47162.516354635, 1.354371923 },
/* 401, 410 */
{ 0.000146e-6, 11080.171578918, 3.369695406 },
{ 0.000180e-6, -348.924420448, 2.490902145 },
{ 0.000148e-6, 151.047669843, 3.799109588 },
{ 0.000157e-6, 6197.248551160, 1.284375887 },
{ 0.000167e-6, 146.594251718, 0.759969109 },
{ 0.000133e-6, -5331.357443741, 5.409701889 },
{ 0.000154e-6, 95.979227218, 3.366890614 },
{ 0.000148e-6, -6418.140930027, 3.384104996 },
{ 0.000128e-6, -6525.804453965, 3.803419985 },
{ 0.000130e-6, 11293.470674356, 0.939039445 },
/* 411, 420 */
{ 0.000152e-6, -5729.506447149, 0.734117523 },
{ 0.000138e-6, 210.117701700, 2.564216078 },
{ 0.000123e-6, 6066.595360816, 4.517099537 },
{ 0.000140e-6, 18451.078546566, 0.642049130 },
{ 0.000126e-6, 11300.584221356, 3.485280663 },
{ 0.000119e-6, 10027.903195729, 3.217431161 },
{ 0.000151e-6, 4274.518310832, 4.404359108 },
{ 0.000117e-6, 6072.958148291, 0.366324650 },
{ 0.000165e-6, -7668.637425143, 4.298212528 },
{ 0.000117e-6, -6245.048177356, 5.379518958 },
/* 421, 430 */
{ 0.000130e-6, -5888.449964932, 4.527681115 },
{ 0.000121e-6, -543.918059096, 6.109429504 },
{ 0.000162e-6, 9683.594581116, 5.720092446 },
{ 0.000141e-6, 6219.339951688, 0.679068671 },
{ 0.000118e-6, 22743.409379516, 4.881123092 },
{ 0.000129e-6, 1692.165669502, 0.351407289 },
{ 0.000126e-6, 5657.405657679, 5.146592349 },
{ 0.000114e-6, 728.762966531, 0.520791814 },
{ 0.000120e-6, 52.596639600, 0.948516300 },
{ 0.000115e-6, 65.220371012, 3.504914846 },
/* 431, 440 */
{ 0.000126e-6, 5881.403728234, 5.577502482 },
{ 0.000158e-6, 163096.180360983, 2.957128968 },
{ 0.000134e-6, 12341.806904281, 2.598576764 },
{ 0.000151e-6, 16627.370915377, 3.985702050 },
{ 0.000109e-6, 1368.660252845, 0.014730471 },
{ 0.000131e-6, 6211.263196841, 0.085077024 },
{ 0.000146e-6, 5792.741760812, 0.708426604 },
{ 0.000146e-6, -77.750543984, 3.121576600 },
{ 0.000107e-6, 5341.013788022, 0.288231904 },
{ 0.000138e-6, 6281.591377283, 2.797450317 },
/* 441, 450 */
{ 0.000113e-6, -6277.552925684, 2.788904128 },
{ 0.000115e-6, -525.758811831, 5.895222200 },
{ 0.000138e-6, 6016.468808270, 6.096188999 },
{ 0.000139e-6, 23539.707386333, 2.028195445 },
{ 0.000146e-6, -4176.041342449, 4.660008502 },
{ 0.000107e-6, 16062.184526117, 4.066520001 },
{ 0.000142e-6, 83783.548222473, 2.936315115 },
{ 0.000128e-6, 9380.959672717, 3.223844306 },
{ 0.000135e-6, 6205.325306007, 1.638054048 },
{ 0.000101e-6, 2699.734819318, 5.481603249 },
/* 451, 460 */
{ 0.000104e-6, -568.821874027, 2.205734493 },
{ 0.000103e-6, 6321.103522627, 2.440421099 },
{ 0.000119e-6, 6321.208885629, 2.547496264 },
{ 0.000138e-6, 1975.492545856, 2.314608466 },
{ 0.000121e-6, 137.033024162, 4.539108237 },
{ 0.000123e-6, 19402.796952817, 4.538074405 },
{ 0.000119e-6, 22805.735565994, 2.869040566 },
{ 0.000133e-6, 64471.991241142, 6.056405489 },
{ 0.000129e-6, -85.827298831, 2.540635083 },
{ 0.000131e-6, 13613.804277336, 4.005732868 },
/* 461, 470 */
{ 0.000104e-6, 9814.604100291, 1.959967212 },
{ 0.000112e-6, 16097.679950283, 3.589026260 },
{ 0.000123e-6, 2107.034507542, 1.728627253 },
{ 0.000121e-6, 36949.230808424, 6.072332087 },
{ 0.000108e-6, -12539.853380183, 3.716133846 },
{ 0.000113e-6, -7875.671863624, 2.725771122 },
{ 0.000109e-6, 4171.425536614, 4.033338079 },
{ 0.000101e-6, 6247.911759770, 3.441347021 },
{ 0.000113e-6, 7330.728427345, 0.656372122 },
{ 0.000113e-6, 51092.726050855, 2.791483066 },
/* 471, 480 */
{ 0.000106e-6, 5621.842923210, 1.815323326 },
{ 0.000101e-6, 111.430161497, 5.711033677 },
{ 0.000103e-6, 909.818733055, 2.812745443 },
{ 0.000101e-6, 1790.642637886, 1.965746028 },
/* T */
{ 102.156724e-6, 6283.075849991, 4.249032005 },
{ 1.706807e-6, 12566.151699983, 4.205904248 },
{ 0.269668e-6, 213.299095438, 3.400290479 },
{ 0.265919e-6, 529.690965095, 5.836047367 },
{ 0.210568e-6, -3.523118349, 6.262738348 },
{ 0.077996e-6, 5223.693919802, 4.670344204 },
/* 481, 490 */
{ 0.054764e-6, 1577.343542448, 4.534800170 },
{ 0.059146e-6, 26.298319800, 1.083044735 },
{ 0.034420e-6, -398.149003408, 5.980077351 },
{ 0.032088e-6, 18849.227549974, 4.162913471 },
{ 0.033595e-6, 5507.553238667, 5.980162321 },
{ 0.029198e-6, 5856.477659115, 0.623811863 },
{ 0.027764e-6, 155.420399434, 3.745318113 },
{ 0.025190e-6, 5746.271337896, 2.980330535 },
{ 0.022997e-6, -796.298006816, 1.174411803 },
{ 0.024976e-6, 5760.498431898, 2.467913690 },
/* 491, 500 */
{ 0.021774e-6, 206.185548437, 3.854787540 },
{ 0.017925e-6, -775.522611324, 1.092065955 },
{ 0.013794e-6, 426.598190876, 2.699831988 },
{ 0.013276e-6, 6062.663207553, 5.845801920 },
{ 0.011774e-6, 12036.460734888, 2.292832062 },
{ 0.012869e-6, 6076.890301554, 5.333425680 },
{ 0.012152e-6, 1059.381930189, 6.222874454 },
{ 0.011081e-6, -7.113547001, 5.154724984 },
{ 0.010143e-6, 4694.002954708, 4.044013795 },
{ 0.009357e-6, 5486.777843175, 3.416081409 },
/* 501, 510 */
{ 0.010084e-6, 522.577418094, 0.749320262 },
{ 0.008587e-6, 10977.078804699, 2.777152598 },
{ 0.008628e-6, 6275.962302991, 4.562060226 },
{ 0.008158e-6, -220.412642439, 5.806891533 },
{ 0.007746e-6, 2544.314419883, 1.603197066 },
{ 0.007670e-6, 2146.165416475, 3.000200440 },
{ 0.007098e-6, 74.781598567, 0.443725817 },
{ 0.006180e-6, -536.804512095, 1.302642751 },
{ 0.005818e-6, 5088.628839767, 4.827723531 },
{ 0.004945e-6, -6286.598968340, 0.268305170 },
/* 511, 520 */
{ 0.004774e-6, 1349.867409659, 5.808636673 },
{ 0.004687e-6, -242.728603974, 5.154890570 },
{ 0.006089e-6, 1748.016413067, 4.403765209 },
{ 0.005975e-6, -1194.447010225, 2.583472591 },
{ 0.004229e-6, 951.718406251, 0.931172179 },
{ 0.005264e-6, 553.569402842, 2.336107252 },
{ 0.003049e-6, 5643.178563677, 1.362634430 },
{ 0.002974e-6, 6812.766815086, 1.583012668 },
{ 0.003403e-6, -2352.866153772, 2.552189886 },
{ 0.003030e-6, 419.484643875, 5.286473844 },
/* 521, 530 */
{ 0.003210e-6, -7.046236698, 1.863796539 },
{ 0.003058e-6, 9437.762934887, 4.226420633 },
{ 0.002589e-6, 12352.852604545, 1.991935820 },
{ 0.002927e-6, 5216.580372801, 2.319951253 },
{ 0.002425e-6, 5230.807466803, 3.084752833 },
{ 0.002656e-6, 3154.687084896, 2.487447866 },
{ 0.002445e-6, 10447.387839604, 2.347139160 },
{ 0.002990e-6, 4690.479836359, 6.235872050 },
{ 0.002890e-6, 5863.591206116, 0.095197563 },
{ 0.002498e-6, 6438.496249426, 2.994779800 },
/* 531, 540 */
{ 0.001889e-6, 8031.092263058, 3.569003717 },
{ 0.002567e-6, 801.820931124, 3.425611498 },
{ 0.001803e-6, -71430.695617928, 2.192295512 },
{ 0.001782e-6, 3.932153263, 5.180433689 },
{ 0.001694e-6, -4705.732307544, 4.641779174 },
{ 0.001704e-6, -1592.596013633, 3.997097652 },
{ 0.001735e-6, 5849.364112115, 0.417558428 },
{ 0.001643e-6, 8429.241266467, 2.180619584 },
{ 0.001680e-6, 38.133035638, 4.164529426 },
{ 0.002045e-6, 7084.896781115, 0.526323854 },
/* 541, 550 */
{ 0.001458e-6, 4292.330832950, 1.356098141 },
{ 0.001437e-6, 20.355319399, 3.895439360 },
{ 0.001738e-6, 6279.552731642, 0.087484036 },
{ 0.001367e-6, 14143.495242431, 3.987576591 },
{ 0.001344e-6, 7234.794256242, 0.090454338 },
{ 0.001438e-6, 11499.656222793, 0.974387904 },
{ 0.001257e-6, 6836.645252834, 1.509069366 },
{ 0.001358e-6, 11513.883316794, 0.495572260 },
{ 0.001628e-6, 7632.943259650, 4.968445721 },
{ 0.001169e-6, 103.092774219, 2.838496795 },
/* 551, 560 */
{ 0.001162e-6, 4164.311989613, 3.408387778 },
{ 0.001092e-6, 6069.776754553, 3.617942651 },
{ 0.001008e-6, 17789.845619785, 0.286350174 },
{ 0.001008e-6, 639.897286314, 1.610762073 },
{ 0.000918e-6, 10213.285546211, 5.532798067 },
{ 0.001011e-6, -6256.777530192, 0.661826484 },
{ 0.000753e-6, 16730.463689596, 3.905030235 },
{ 0.000737e-6, 11926.254413669, 4.641956361 },
{ 0.000694e-6, 3340.612426700, 2.111120332 },
{ 0.000701e-6, 3894.181829542, 2.760823491 },
/* 561, 570 */
{ 0.000689e-6, -135.065080035, 4.768800780 },
{ 0.000700e-6, 13367.972631107, 5.760439898 },
{ 0.000664e-6, 6040.347246017, 1.051215840 },
{ 0.000654e-6, 5650.292110678, 4.911332503 },
{ 0.000788e-6, 6681.224853400, 4.699648011 },
{ 0.000628e-6, 5333.900241022, 5.024608847 },
{ 0.000755e-6, -110.206321219, 4.370971253 },
{ 0.000628e-6, 6290.189396992, 3.660478857 },
{ 0.000635e-6, 25132.303399966, 4.121051532 },
{ 0.000534e-6, 5966.683980335, 1.173284524 },
/* 571, 580 */
{ 0.000543e-6, -433.711737877, 0.345585464 },
{ 0.000517e-6, -1990.745017041, 5.414571768 },
{ 0.000504e-6, 5767.611978898, 2.328281115 },
{ 0.000485e-6, 5753.384884897, 1.685874771 },
{ 0.000463e-6, 7860.419392439, 5.297703006 },
{ 0.000604e-6, 515.463871093, 0.591998446 },
{ 0.000443e-6, 12168.002696575, 4.830881244 },
{ 0.000570e-6, 199.072001436, 3.899190272 },
{ 0.000465e-6, 10969.965257698, 0.476681802 },
{ 0.000424e-6, -7079.373856808, 1.112242763 },
/* 581, 590 */
{ 0.000427e-6, 735.876513532, 1.994214480 },
{ 0.000478e-6, -6127.655450557, 3.778025483 },
{ 0.000414e-6, 10973.555686350, 5.441088327 },
{ 0.000512e-6, 1589.072895284, 0.107123853 },
{ 0.000378e-6, 10984.192351700, 0.915087231 },
{ 0.000402e-6, 11371.704689758, 4.107281715 },
{ 0.000453e-6, 9917.696874510, 1.917490952 },
{ 0.000395e-6, 149.563197135, 2.763124165 },
{ 0.000371e-6, 5739.157790895, 3.112111866 },
{ 0.000350e-6, 11790.629088659, 0.440639857 },
/* 591, 600 */
{ 0.000356e-6, 6133.512652857, 5.444568842 },
{ 0.000344e-6, 412.371096874, 5.676832684 },
{ 0.000383e-6, 955.599741609, 5.559734846 },
{ 0.000333e-6, 6496.374945429, 0.261537984 },
{ 0.000340e-6, 6055.549660552, 5.975534987 },
{ 0.000334e-6, 1066.495477190, 2.335063907 },
{ 0.000399e-6, 11506.769769794, 5.321230910 },
{ 0.000314e-6, 18319.536584880, 2.313312404 },
{ 0.000424e-6, 1052.268383188, 1.211961766 },
{ 0.000307e-6, 63.735898303, 3.169551388 },
/* 601, 610 */
{ 0.000329e-6, 29.821438149, 6.106912080 },
{ 0.000357e-6, 6309.374169791, 4.223760346 },
{ 0.000312e-6, -3738.761430108, 2.180556645 },
{ 0.000301e-6, 309.278322656, 1.499984572 },
{ 0.000268e-6, 12043.574281889, 2.447520648 },
{ 0.000257e-6, 12491.370101415, 3.662331761 },
{ 0.000290e-6, 625.670192312, 1.272834584 },
{ 0.000256e-6, 5429.879468239, 1.913426912 },
{ 0.000339e-6, 3496.032826134, 4.165930011 },
{ 0.000283e-6, 3930.209696220, 4.325565754 },
/* 611, 620 */
{ 0.000241e-6, 12528.018664345, 3.832324536 },
{ 0.000304e-6, 4686.889407707, 1.612348468 },
{ 0.000259e-6, 16200.772724501, 3.470173146 },
{ 0.000238e-6, 12139.553509107, 1.147977842 },
{ 0.000236e-6, 6172.869528772, 3.776271728 },
{ 0.000296e-6, -7058.598461315, 0.460368852 },
{ 0.000306e-6, 10575.406682942, 0.554749016 },
{ 0.000251e-6, 17298.182327326, 0.834332510 },
{ 0.000290e-6, 4732.030627343, 4.759564091 },
{ 0.000261e-6, 5884.926846583, 0.298259862 },
/* 621, 630 */
{ 0.000249e-6, 5547.199336460, 3.749366406 },
{ 0.000213e-6, 11712.955318231, 5.415666119 },
{ 0.000223e-6, 4701.116501708, 2.703203558 },
{ 0.000268e-6, -640.877607382, 0.283670793 },
{ 0.000209e-6, 5636.065016677, 1.238477199 },
{ 0.000193e-6, 10177.257679534, 1.943251340 },
{ 0.000182e-6, 6283.143160294, 2.456157599 },
{ 0.000184e-6, -227.526189440, 5.888038582 },
{ 0.000182e-6, -6283.008539689, 0.241332086 },
{ 0.000228e-6, -6284.056171060, 2.657323816 },
/* 631, 640 */
{ 0.000166e-6, 7238.675591600, 5.930629110 },
{ 0.000167e-6, 3097.883822726, 5.570955333 },
{ 0.000159e-6, -323.505416657, 5.786670700 },
{ 0.000154e-6, -4136.910433516, 1.517805532 },
{ 0.000176e-6, 12029.347187887, 3.139266834 },
{ 0.000167e-6, 12132.439962106, 3.556352289 },
{ 0.000153e-6, 202.253395174, 1.463313961 },
{ 0.000157e-6, 17267.268201691, 1.586837396 },
{ 0.000142e-6, 83996.847317911, 0.022670115 },
{ 0.000152e-6, 17260.154654690, 0.708528947 },
/* 641, 650 */
{ 0.000144e-6, 6084.003848555, 5.187075177 },
{ 0.000135e-6, 5756.566278634, 1.993229262 },
{ 0.000134e-6, 5750.203491159, 3.457197134 },
{ 0.000144e-6, 5326.786694021, 6.066193291 },
{ 0.000160e-6, 11015.106477335, 1.710431974 },
{ 0.000133e-6, 3634.621024518, 2.836451652 },
{ 0.000134e-6, 18073.704938650, 5.453106665 },
{ 0.000134e-6, 1162.474704408, 5.326898811 },
{ 0.000128e-6, 5642.198242609, 2.511652591 },
{ 0.000160e-6, 632.783739313, 5.628785365 },
/* 651, 660 */
{ 0.000132e-6, 13916.019109642, 0.819294053 },
{ 0.000122e-6, 14314.168113050, 5.677408071 },
{ 0.000125e-6, 12359.966151546, 5.251984735 },
{ 0.000121e-6, 5749.452731634, 2.210924603 },
{ 0.000136e-6, -245.831646229, 1.646502367 },
{ 0.000120e-6, 5757.317038160, 3.240883049 },
{ 0.000134e-6, 12146.667056108, 3.059480037 },
{ 0.000137e-6, 6206.809778716, 1.867105418 },
{ 0.000141e-6, 17253.041107690, 2.069217456 },
{ 0.000129e-6, -7477.522860216, 2.781469314 },
/* 661, 670 */
{ 0.000116e-6, 5540.085789459, 4.281176991 },
{ 0.000116e-6, 9779.108676125, 3.320925381 },
{ 0.000129e-6, 5237.921013804, 3.497704076 },
{ 0.000113e-6, 5959.570433334, 0.983210840 },
{ 0.000122e-6, 6282.095528923, 2.674938860 },
{ 0.000140e-6, -11.045700264, 4.957936982 },
{ 0.000108e-6, 23543.230504682, 1.390113589 },
{ 0.000106e-6, -12569.674818332, 0.429631317 },
{ 0.000110e-6, -266.607041722, 5.501340197 },
{ 0.000115e-6, 12559.038152982, 4.691456618 },
/* 671, 680 */
{ 0.000134e-6, -2388.894020449, 0.577313584 },
{ 0.000109e-6, 10440.274292604, 6.218148717 },
{ 0.000102e-6, -543.918059096, 1.477842615 },
{ 0.000108e-6, 21228.392023546, 2.237753948 },
{ 0.000101e-6, -4535.059436924, 3.100492232 },
{ 0.000103e-6, 76.266071276, 5.594294322 },
{ 0.000104e-6, 949.175608970, 5.674287810 },
{ 0.000101e-6, 13517.870106233, 2.196632348 },
{ 0.000100e-6, 11933.367960670, 4.056084160 },
/* T^2 */
{ 4.322990e-6, 6283.075849991, 2.642893748 },
/* 681, 690 */
{ 0.406495e-6, 0.000000000, 4.712388980 },
{ 0.122605e-6, 12566.151699983, 2.438140634 },
{ 0.019476e-6, 213.299095438, 1.642186981 },
{ 0.016916e-6, 529.690965095, 4.510959344 },
{ 0.013374e-6, -3.523118349, 1.502210314 },
{ 0.008042e-6, 26.298319800, 0.478549024 },
{ 0.007824e-6, 155.420399434, 5.254710405 },
{ 0.004894e-6, 5746.271337896, 4.683210850 },
{ 0.004875e-6, 5760.498431898, 0.759507698 },
{ 0.004416e-6, 5223.693919802, 6.028853166 },
/* 691, 700 */
{ 0.004088e-6, -7.113547001, 0.060926389 },
{ 0.004433e-6, 77713.771467920, 3.627734103 },
{ 0.003277e-6, 18849.227549974, 2.327912542 },
{ 0.002703e-6, 6062.663207553, 1.271941729 },
{ 0.003435e-6, -775.522611324, 0.747446224 },
{ 0.002618e-6, 6076.890301554, 3.633715689 },
{ 0.003146e-6, 206.185548437, 5.647874613 },
{ 0.002544e-6, 1577.343542448, 6.232904270 },
{ 0.002218e-6, -220.412642439, 1.309509946 },
{ 0.002197e-6, 5856.477659115, 2.407212349 },
/* 701, 710 */
{ 0.002897e-6, 5753.384884897, 5.863842246 },
{ 0.001766e-6, 426.598190876, 0.754113147 },
{ 0.001738e-6, -796.298006816, 2.714942671 },
{ 0.001695e-6, 522.577418094, 2.629369842 },
{ 0.001584e-6, 5507.553238667, 1.341138229 },
{ 0.001503e-6, -242.728603974, 0.377699736 },
{ 0.001552e-6, -536.804512095, 2.904684667 },
{ 0.001370e-6, -398.149003408, 1.265599125 },
{ 0.001889e-6, -5573.142801634, 4.413514859 },
{ 0.001722e-6, 6069.776754553, 2.445966339 },
/* 711, 720 */
{ 0.001124e-6, 1059.381930189, 5.041799657 },
{ 0.001258e-6, 553.569402842, 3.849557278 },
{ 0.000831e-6, 951.718406251, 2.471094709 },
{ 0.000767e-6, 4694.002954708, 5.363125422 },
{ 0.000756e-6, 1349.867409659, 1.046195744 },
{ 0.000775e-6, -11.045700264, 0.245548001 },
{ 0.000597e-6, 2146.165416475, 4.543268798 },
{ 0.000568e-6, 5216.580372801, 4.178853144 },
{ 0.000711e-6, 1748.016413067, 5.934271972 },
{ 0.000499e-6, 12036.460734888, 0.624434410 },
/* 721, 730 */
{ 0.000671e-6, -1194.447010225, 4.136047594 },
{ 0.000488e-6, 5849.364112115, 2.209679987 },
{ 0.000621e-6, 6438.496249426, 4.518860804 },
{ 0.000495e-6, -6286.598968340, 1.868201275 },
{ 0.000456e-6, 5230.807466803, 1.271231591 },
{ 0.000451e-6, 5088.628839767, 0.084060889 },
{ 0.000435e-6, 5643.178563677, 3.324456609 },
{ 0.000387e-6, 10977.078804699, 4.052488477 },
{ 0.000547e-6, 161000.685737473, 2.841633844 },
{ 0.000522e-6, 3154.687084896, 2.171979966 },
/* 731, 740 */
{ 0.000375e-6, 5486.777843175, 4.983027306 },
{ 0.000421e-6, 5863.591206116, 4.546432249 },
{ 0.000439e-6, 7084.896781115, 0.522967921 },
{ 0.000309e-6, 2544.314419883, 3.172606705 },
{ 0.000347e-6, 4690.479836359, 1.479586566 },
{ 0.000317e-6, 801.820931124, 3.553088096 },
{ 0.000262e-6, 419.484643875, 0.606635550 },
{ 0.000248e-6, 6836.645252834, 3.014082064 },
{ 0.000245e-6, -1592.596013633, 5.519526220 },
{ 0.000225e-6, 4292.330832950, 2.877956536 },
/* 741, 750 */
{ 0.000214e-6, 7234.794256242, 1.605227587 },
{ 0.000205e-6, 5767.611978898, 0.625804796 },
{ 0.000180e-6, 10447.387839604, 3.499954526 },
{ 0.000229e-6, 199.072001436, 5.632304604 },
{ 0.000214e-6, 639.897286314, 5.960227667 },
{ 0.000175e-6, -433.711737877, 2.162417992 },
{ 0.000209e-6, 515.463871093, 2.322150893 },
{ 0.000173e-6, 6040.347246017, 2.556183691 },
{ 0.000184e-6, 6309.374169791, 4.732296790 },
{ 0.000227e-6, 149854.400134205, 5.385812217 },
/* 751, 760 */
{ 0.000154e-6, 8031.092263058, 5.120720920 },
{ 0.000151e-6, 5739.157790895, 4.815000443 },
{ 0.000197e-6, 7632.943259650, 0.222827271 },
{ 0.000197e-6, 74.781598567, 3.910456770 },
{ 0.000138e-6, 6055.549660552, 1.397484253 },
{ 0.000149e-6, -6127.655450557, 5.333727496 },
{ 0.000137e-6, 3894.181829542, 4.281749907 },
{ 0.000135e-6, 9437.762934887, 5.979971885 },
{ 0.000139e-6, -2352.866153772, 4.715630782 },
{ 0.000142e-6, 6812.766815086, 0.513330157 },
/* 761, 770 */
{ 0.000120e-6, -4705.732307544, 0.194160689 },
{ 0.000131e-6, -71430.695617928, 0.000379226 },
{ 0.000124e-6, 6279.552731642, 2.122264908 },
{ 0.000108e-6, -6256.777530192, 0.883445696 },
/* T^3 */
{ 0.143388e-6, 6283.075849991, 1.131453581 },
{ 0.006671e-6, 12566.151699983, 0.775148887 },
{ 0.001480e-6, 155.420399434, 0.480016880 },
{ 0.000934e-6, 213.299095438, 6.144453084 },
{ 0.000795e-6, 529.690965095, 2.941595619 },
{ 0.000673e-6, 5746.271337896, 0.120415406 },
/* 771, 780 */
{ 0.000672e-6, 5760.498431898, 5.317009738 },
{ 0.000389e-6, -220.412642439, 3.090323467 },
{ 0.000373e-6, 6062.663207553, 3.003551964 },
{ 0.000360e-6, 6076.890301554, 1.918913041 },
{ 0.000316e-6, -21.340641002, 5.545798121 },
{ 0.000315e-6, -242.728603974, 1.884932563 },
{ 0.000278e-6, 206.185548437, 1.266254859 },
{ 0.000238e-6, -536.804512095, 4.532664830 },
{ 0.000185e-6, 522.577418094, 4.578313856 },
{ 0.000245e-6, 18849.227549974, 0.587467082 },
/* 781, 787 */
{ 0.000180e-6, 426.598190876, 5.151178553 },
{ 0.000200e-6, 553.569402842, 5.355983739 },
{ 0.000141e-6, 5223.693919802, 1.336556009 },
{ 0.000104e-6, 5856.477659115, 4.239842759 },
/* T^4 */
{ 0.003826e-6, 6283.075849991, 5.705257275 },
{ 0.000303e-6, 12566.151699983, 5.407132842 },
{ 0.000209e-6, 155.420399434, 1.989815753 }
};
/* Time since J2000.0 in Julian millennia. */
t = ((date1 - DJ00) + date2) / DJM;
/* ================= */
/* Topocentric terms */
/* ================= */
/* Convert UT to local solar time in radians. */
tsol = fmod(ut, 1.0) * D2PI + elong;
/* FUNDAMENTAL ARGUMENTS: Simon et al. 1994. */
/* Combine time argument (millennia) with deg/arcsec factor. */
w = t / 3600.0;
/* Sun Mean Longitude. */
elsun = fmod(280.46645683 + 1296027711.03429 * w, 360.0) * DD2R;
/* Sun Mean Anomaly. */
emsun = fmod(357.52910918 + 1295965810.481 * w, 360.0) * DD2R;
/* Mean Elongation of Moon from Sun. */
d = fmod(297.85019547 + 16029616012.090 * w, 360.0) * DD2R;
/* Mean Longitude of Jupiter. */
elj = fmod(34.35151874 + 109306899.89453 * w, 360.0) * DD2R;
/* Mean Longitude of Saturn. */
els = fmod(50.07744430 + 44046398.47038 * w, 360.0) * DD2R;
/* TOPOCENTRIC TERMS: Moyer 1981 and Murray 1983. */
wt = + 0.00029e-10 * u * sin(tsol + elsun - els)
+ 0.00100e-10 * u * sin(tsol - 2.0 * emsun)
+ 0.00133e-10 * u * sin(tsol - d)
+ 0.00133e-10 * u * sin(tsol + elsun - elj)
- 0.00229e-10 * u * sin(tsol + 2.0 * elsun + emsun)
- 0.02200e-10 * v * cos(elsun + emsun)
+ 0.05312e-10 * u * sin(tsol - emsun)
- 0.13677e-10 * u * sin(tsol + 2.0 * elsun)
- 1.31840e-10 * v * cos(elsun)
+ 3.17679e-10 * u * sin(tsol);
/* ===================== */
/* Fairhead et al. model */
/* ===================== */
/* T**0 */
w0 = 0;
for (j = 473; j >= 0; j--) {
w0 += fairhd[j][0] * sin(fairhd[j][1] * t + fairhd[j][2]);
}
/* T**1 */
w1 = 0;
for (j = 678; j >= 474; j--) {
w1 += fairhd[j][0] * sin(fairhd[j][1] * t + fairhd[j][2]);
}
/* T**2 */
w2 = 0;
for (j = 763; j >= 679; j--) {
w2 += fairhd[j][0] * sin(fairhd[j][1] * t + fairhd[j][2]);
}
/* T**3 */
w3 = 0;
for (j = 783; j >= 764; j--) {
w3 += fairhd[j][0] * sin(fairhd[j][1] * t + fairhd[j][2]);
}
/* T**4 */
w4 = 0;
for (j = 786; j >= 784; j--) {
w4 += fairhd[j][0] * sin(fairhd[j][1] * t + fairhd[j][2]);
}
/* Multiply by powers of T and combine. */
wf = t * (t * (t * (t * w4 + w3) + w2) + w1) + w0;
/* Adjustments to use JPL planetary masses instead of IAU. */
wj = 0.00065e-6 * sin(6069.776754 * t + 4.021194) +
0.00033e-6 * sin( 213.299095 * t + 5.543132) +
(-0.00196e-6 * sin(6208.294251 * t + 5.696701)) +
(-0.00173e-6 * sin( 74.781599 * t + 2.435900)) +
0.03638e-6 * t * t;
/* ============ */
/* Final result */
/* ============ */
/* TDB-TT in seconds. */
w = wt + wf + wj;
return w;
/*-----------------------------------------------------------------------
**
** Copyright (C) 2008
** Standards Of Fundamental Astronomy Review Board
** of the International Astronomical Union.
**
** =====================
** SOFA Software License
** =====================
**
** NOTICE TO USER:
**
** BY USING THIS SOFTWARE YOU ACCEPT THE FOLLOWING TERMS AND CONDITIONS
** WHICH APPLY TO ITS USE.
**
** 1. The Software is owned by the IAU SOFA Review Board ("the Board").
**
** 2. Permission is granted to anyone to use the SOFA software for any
** purpose, including commercial applications, free of charge and
** without payment of royalties, subject to the conditions and
** restrictions listed below.
**
** 3. You (the user) may copy and adapt the SOFA software and its
** algorithms for your own purposes and you may copy and distribute
** a resulting "derived work" to others on a world-wide, royalty-free
** basis, provided that the derived work complies with the following
** requirements:
**
** a) Your work shall be marked or carry a statement that it (i) uses
** routines and computations derived by you from software provided
** by SOFA under license to you; and (ii) does not contain
** software provided by SOFA or software that has been distributed
** by or endorsed by SOFA.
**
** b) The source code of your derived work must contain descriptions
** of how the derived work is based upon and/or differs from the
** original SOFA software.
**
** c) The name(s) of all routine(s) that you distribute shall differ
** from the SOFA names, even when the SOFA content has not been
** otherwise changed.
**
** d) The routine-naming prefix "iau" shall not be used.
**
** e) The origin of the SOFA components of your derived work must not
** be misrepresented; you must not claim that you wrote the
** original software, nor file a patent application for SOFA
** software or algorithms embedded in the SOFA software.
**
** f) These requirements must be reproduced intact in any source
** distribution and shall apply to anyone to whom you have granted
** a further right to modify the source code of your derived work.
**
** 4. In any published work or commercial products which includes
** results achieved by using the SOFA software, you shall acknowledge
** that the SOFA software was used in obtaining those results.
**
** 5. You shall not cause the SOFA software to be brought into
** disrepute, either by misuse, or use for inappropriate tasks, or by
** inappropriate modification.
**
** 6. The SOFA software is provided "as is" and the Board makes no
** warranty as to its use or performance. The Board does not and
** cannot warrant the performance or results which the user may obtain
** by using the SOFA software. The Board makes no warranties, express
** or implied, as to non-infringement of third party rights,
** merchantability, or fitness for any particular purpose. In no
** event will the Board be liable to the user for any consequential,
** incidental, or special damages, including any lost profits or lost
** savings, even if a Board representative has been advised of such
** damages, or for any claim by any third party.
**
** 7. The provision of any version of the SOFA software under the terms
** and conditions specified herein does not imply that future
** versions will also be made available under the same terms and
** conditions.
**
** Correspondence concerning SOFA software should be addressed as
** follows:
**
** Internet email: sofa@rl.ac.uk
** Postal address: IAU SOFA Center
** Rutherford Appleton Laboratory
** Chilton, Didcot, Oxon OX11 0QX
** United Kingdom
**
**---------------------------------------------------------------------*/
}
|
1cfea_so4_gcc_advfsg.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
#include <stdio.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw);
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*r21)[y_size + 1][z_size + 1];
posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r20)[y_size + 1][z_size + 1];
posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r19)[y_size + 1][z_size + 1];
posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r18)[y_size + 1][z_size + 1];
posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r17)[y_size + 1][z_size + 1];
posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r34)[y_size + 1][z_size + 1];
posix_memalign((void **)&r34, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r35)[y_size + 1][z_size + 1];
posix_memalign((void **)&r35, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(static, 1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta, phi, theta : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 2;
int t_blk_size = 2 * sf * (time_M - time_m);
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m - 1; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m - 1; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size)
{
//printf(" Timestep tw: %d, Updating x: %d y: %d \n", xb, yb);
for (int time = t_blk, t0 = (time) % (3), t1 = (time + 1) % (3), t2 = (time + 2) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, time, t0, x0_blk0_size, x_M, x_m - 1, y0_blk0_size, y_M, y_m - 1, z_M, z_m, nthreads, xb, yb, xb_size, yb_size, tw);
//printf("\n BF0 - 1 IS OVER");
/*==============================================*/
bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, x0_blk0_size, x_M, x_m, y0_blk0_size, y_M, y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, tw);
//printf("\n BF1 - 1 IS OVER");
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
}
}
}
free(r21);
free(r20);
free(r19);
free(r18);
free(r17);
free(r34);
free(r35);
return 0;
}
void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw)
{
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict r34)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r34_vec;
float(*restrict r35)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r35_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
//printf(" Change of inner x0_blk0 %d \n", x0_blk0);
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
//printf(" bf0 Timestep tw: %d, Updating x: %d \n", tw, x - time + 1);
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
// printf(" bf0 Timestep tw: %d, Updating x: %d y: %d \n", tw, x - time + 1, y - time + 1);
#pragma omp simd aligned(u, v : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
//printf(" bf0 Updating x: %d y: %d z: %d \n", x - time + 1, y - time + 1, z + 1);
float r39 = -v[t0][x - time + 4][y - time + 4][z + 4];
r35[x - time + 1][y - time + 1][z + 1] = 1.0e-1F * (-(r39 + v[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r39 + v[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r39 + v[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
float r40 = -u[t0][x - time + 4][y - time + 4][z + 4];
r34[x - time + 1][y - time + 1][z + 1] = 1.0e-1F * (-(r40 + u[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r40 + u[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r40 + u[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
}
}
}
}
}
}
}
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float(*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r17_vec;
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict r34)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r34_vec;
float(*restrict r35)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r35_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
//printf("In bf1 \n");
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(dynamic, 1)
for (int x1_blk0 = max((x_m + time), xb - 0); x1_blk0 <= +min((x_M + time), (xb - 0 + xb_size)); x1_blk0 += x1_blk0_size)
{
//printf(" Change of inner x1_blk0 %d \n", x1_blk0);
for (int y1_blk0 = max((y_m + time), yb - 0); y1_blk0 <= +min((y_M + time), (yb - 0 + yb_size)); y1_blk0 += y1_blk0_size)
{
for (int x = x1_blk0; x <= min(min((x_M + time), (xb - 0 + xb_size - 1)), (x1_blk0 + x1_blk0_size - 1)); x++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d \n", tw, x - time + 4);
for (int y = y1_blk0; y <= min(min((y_M + time), (yb - 0 + yb_size - 1)), (y1_blk0 + y1_blk0_size - 1)); y++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d y: %d \n", tw, x - time + 4, y - time + 4);
#pragma omp simd aligned(damp, epsilon, u, v, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
//printf(" bf1 Updating x: %d y: %d z: %d \n", x - time + 4, y - time + 4, z + 4);
//printf(" bf1 Updating x: %d y: %d z: %d \n", x - time + 4, y - time + 4, z + 4);
float r46 = 1.0 / dt;
float r45 = 1.0 / (dt * dt);
float r44 = r18[x - time + 1][y - time + 1][z] * r35[x - time + 1][y - time + 1][z] - r18[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1] + r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r35[x - time + 1][y - time][z + 1] - r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1] + r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r35[x - time][y - time + 1][z + 1] - r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1];
float r43 = pow(vp[x - time + 4][y - time + 4][z + 4], -2);
float r42 = 1.0e-1F * (-r18[x - time + 1][y - time + 1][z] * r34[x - time + 1][y - time + 1][z] + r18[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1] - r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r34[x - time + 1][y - time][z + 1] + r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1] - r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r34[x - time][y - time + 1][z + 1] + r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1]) - 8.33333315e-4F * (u[t0][x - time + 2][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 2][z + 4] + u[t0][x - time + 4][y - time + 4][z + 2] + u[t0][x - time + 4][y - time + 4][z + 6] + u[t0][x - time + 4][y - time + 6][z + 4] + u[t0][x - time + 6][y - time + 4][z + 4]) + 1.3333333e-2F * (u[t0][x - time + 3][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 3][z + 4] + u[t0][x - time + 4][y - time + 4][z + 3] + u[t0][x - time + 4][y - time + 4][z + 5] + u[t0][x - time + 4][y - time + 5][z + 4] + u[t0][x - time + 5][y - time + 4][z + 4]) - 7.49999983e-2F * u[t0][x - time + 4][y - time + 4][z + 4];
float r41 = 1.0 / (r43 * r45 + r46 * damp[x - time + 1][y - time + 1][z + 1]);
float r32 = r45 * (-2.0F * u[t0][x - time + 4][y - time + 4][z + 4] + u[t2][x - time + 4][y - time + 4][z + 4]);
float r33 = r45 * (-2.0F * v[t0][x - time + 4][y - time + 4][z + 4] + v[t2][x - time + 4][y - time + 4][z + 4]);
u[t1][x - time + 4][y - time + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x - time + 4][y - time + 4][z + 4] + 1) + 1.0e-1F * r44 * r17[x - time + 1][y - time + 1][z + 1] + r46 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 4][y - time + 4][z + 4]));
v[t1][x - time + 4][y - time + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x - time + 1][y - time + 1][z + 1] + 1.0e-1F * r44 + r46 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 4][y - time + 4][z + 4]));
}
//int sp_zi_M = nnz_sp_source_mask[x - time][y - time] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1)
{
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r22 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
//#pragma omp atomic update
u[t1][x - time + 4][y - time + 4][zind + 4] += r22;
float r23 = save_src_v[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
//#pragma omp atomic update
v[t1][x - time + 4][y - time + 4][zind + 4] += r23;
//printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23);
}
}
}
}
}
}
}
|
mxEvaluateStrongFormEdgeAlterRHS.c | #ifdef _OPENMP
#include <omp.h>
#endif
#include "mex.h"
#include "blas.h"
// #if !defined(_WIN32)
// #define dgemm dgemm_
// #endif
#define DEBUG 0
#define NRHS 9
#define NLHS 1
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
/* check input & output */
if (nrhs != NRHS) {
mexPrintf("Matlab:%s:InvalidNumberInput,\n", __FILE__);
mexPrintf("%d inputs required.\n", NRHS);
}
if (nlhs != NLHS) {
mexPrintf("Matlab:%s:InvalidNumberOutput,\n", __FILE__);
mexPrintf("%d inputs required.\n", NLHS);
}
double *invM = mxGetPr(prhs[0]);
double *Mb = mxGetPr(prhs[1]);
double *FToE = mxGetPr(prhs[2]);
double *FToN1 = mxGetPr(prhs[3]);
double *FToN2 = mxGetPr(prhs[4]);
double *Js = mxGetPr(prhs[5]);
double *J = mxGetPr(prhs[6]);
double *fluxM = mxGetPr(prhs[7]);
double *fluxP = mxGetPr(prhs[8]);
// dims = mxGetDimensions(prhs[6]);
const int Np = mxGetM(prhs[6]); // num of interp nodes
const int K = mxGetN(prhs[6]); // num of elements
const mwSize *dims = mxGetDimensions(prhs[7]);
const int Nfp = dims[0];
const int Ne = dims[1]; // num of edges
int Nfield;
if (mxGetNumberOfDimensions(prhs[7]) > 2) {
Nfield = dims[2];
} else {
Nfield = 1; // fluxM is a 2D matrix
}
const size_t ndimOut = 3;
const mwSize dimOut[3] = {Np, K, Nfield};
plhs[0] = mxCreateNumericArray(ndimOut, dimOut, mxDOUBLE_CLASS, mxREAL);
double *frhs = mxGetPr(plhs[0]);
char *chn = "N";
double one = 1.0, zero = 0.0;
ptrdiff_t oneI = 1;
ptrdiff_t np = Np;
#ifdef _OPENMP
#pragma omp parallel for num_threads(DG_THREADS)
#endif
for (int fld = 0; fld < Nfield; fld++) {
double *rhs = frhs + Np * K * fld;
double *fluxM_ = fluxM + Nfp * Ne * fld;
double *fluxP_ = fluxP + Nfp * Ne * fld;
// double *fluxS_ = fluxS + Nfp * Ne * fld;
for (int k = 0; k < Ne; k++) { // evaluate rhs on each edge
const int e1 = (int)FToE[2 * k] - 1;
const int e2 = (int)FToE[2 * k + 1] - 1;
const int ind1 = e1 * Np - 1;
const int ind2 = e2 * Np - 1;
const int ind = k * Nfp;
double rhsM[Nfp], rhsP[Nfp];
for (int n = 0; n < Nfp; n++) {
rhsM[n] = 0;
rhsP[n] = 0;
}
for (int n = 0; n < Nfp; n++) {
const int sk = n + ind;
double dfM = ( fluxM_[sk] - fluxP_[sk] );
double dfP = ( fluxP_[sk] - fluxM_[sk] );
double j = Js[sk];
double *mb = Mb + n * Nfp;
for (int m = 0; m < Nfp; m++) {
rhsM[m] += mb[m] * j * dfM;
rhsP[m] -= mb[m] * j * dfP;
}
}
for (int n = 0; n < Nfp; n++) {
const int sk = n + ind;
const int m1 = (int)FToN1[sk] + ind1;
const int m2 = (int)FToN2[sk] + ind2;
rhs[m1] += rhsM[n];
rhs[m2] += rhsP[n];
}
}
double temp[Np];
for (int k = 0; k < K; k++) {
double *rhs_ = rhs + k * Np;
double *j = J + k * Np;
dgemm(chn, chn, &np, &oneI, &np, &one, invM, &np, rhs_, &np, &zero, temp,
&np);
// copy rhs
for (int n = 0; n < Np; n++) {
rhs_[n] = temp[n] / j[n];
}
}
}
return;
} |
add_omp.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include "sptensor.h"
/* TODO: bug. */
int sptSparseTensorAddOMP(sptSparseTensor *Y, sptSparseTensor *X, int const nthreads) {
/* Ensure X and Y are in same shape */
if(Y->nmodes != X->nmodes) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP SpTns Add", "shape mismatch");
}
for(sptIndex i = 0; i < X->nmodes; ++i) {
if(Y->ndims[i] != X->ndims[i]) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP SpTns Add", "shape mismatch");
}
}
/* Determine partationing strategy. */
sptNnzIndex * dist_nnzs_X = (sptNnzIndex*)malloc(nthreads*sizeof(sptNnzIndex));
sptNnzIndex * dist_nnzs_Y = (sptNnzIndex*)malloc(nthreads*sizeof(sptNnzIndex));
sptIndex * dist_nrows_Y = (sptIndex*)malloc(nthreads*sizeof(sptIndex));
spt_DistSparseTensor(Y, nthreads, dist_nnzs_Y, dist_nrows_Y);
spt_DistSparseTensorFixed(X, nthreads, dist_nnzs_X);
free(dist_nrows_Y);
printf("dist_nnzs_Y:\n");
for(int i=0; i<nthreads; ++i) {
printf("%zu ", dist_nnzs_Y[i]);
}
printf("\n");
printf("dist_nnzs_X:\n");
for(int i=0; i<nthreads; ++i) {
printf("%zu ", dist_nnzs_X[i]);
}
printf("\n");
fflush(stdout);
/* Build a private arrays to append values. */
sptNnzIndex nnz_gap = llabs((long long) Y->nnz - (long long) X->nnz);
sptNnzIndex increase_size = 0;
if(nnz_gap == 0) increase_size = 10;
else increase_size = nnz_gap;
sptIndexVector **local_inds = (sptIndexVector**)malloc(nthreads* sizeof *local_inds);
for(int k=0; k<nthreads; ++k) {
local_inds[k] = (sptIndexVector*)malloc(Y->nmodes* sizeof *(local_inds[k]));
for(sptIndex m=0; m<Y->nmodes; ++m) {
sptNewIndexVector(&(local_inds[k][m]), 0, increase_size);
}
}
sptValueVector *local_vals = (sptValueVector*)malloc(nthreads* sizeof *local_vals);
for(int k=0; k<nthreads; ++k) {
sptNewValueVector(&(local_vals[k]), 0, increase_size);
}
/* Add elements one by one, assume indices are ordered */
sptNnzIndex Ynnz = 0;
omp_set_dynamic(0);
omp_set_num_threads(nthreads);
#pragma omp parallel reduction(+:Ynnz)
{
int tid = omp_get_thread_num();
sptNnzIndex i=0, j=0;
Ynnz = dist_nnzs_Y[tid];
while(i < dist_nnzs_X[tid] && j < dist_nnzs_Y[tid]) {
int compare = spt_SparseTensorCompareIndices(X, i, Y, j);
if(compare > 0) { // X(i) > Y(j)
++j;
} else if(compare < 0) { // X(i) < Y(j)
sptIndex mode;
int result;
for(mode = 0; mode < X->nmodes; ++mode) {
result = sptAppendIndexVector(&(local_inds[tid][mode]), X->inds[mode].data[i]);
spt_CheckOmpError(result, "OMP SpTns Add", NULL);
}
result = sptAppendValueVector(&(local_vals[tid]), X->values.data[i]);
spt_CheckOmpError(result, "OMP SpTns Add", NULL);
++Ynnz;
++i;
} else { // X(i) = Y(j)
Y->values.data[j] += X->values.data[i];
++i;
++j;
}
}
/* Append remaining elements of X to Y */
while(i < dist_nnzs_X[tid]) {
sptIndex mode;
int result;
for(mode = 0; mode < X->nmodes; ++mode) {
result = sptAppendIndexVector(&(local_inds[tid][mode]), X->inds[mode].data[i]);
spt_CheckOmpError(result, "OMP SpTns Add", NULL);
}
result = sptAppendValueVector(&(local_vals[tid]), X->values.data[i]);
spt_CheckOmpError(result, "OMP SpTns Add", NULL);
++Ynnz;
++i;
}
}
Y->nnz = Ynnz;
/* Append all the local arrays to Y. */
for(int k=0; k<nthreads; ++k) {
for(sptIndex m=0; m<Y->nmodes; ++m) {
sptAppendIndexVectorWithVector(&(Y->inds[m]), &(local_inds[k][m]));
}
sptAppendValueVectorWithVector(&(Y->values), &(local_vals[k]));
}
for(int k=0; k<nthreads; ++k) {
for(sptIndex m=0; m<Y->nmodes; ++m) {
sptFreeIndexVector(&(local_inds[k][m]));
}
free(local_inds[k]);
sptFreeValueVector(&(local_vals[k]));
}
free(local_inds);
free(local_vals);
free(dist_nnzs_X);
free(dist_nnzs_Y);
/* Check whether elements become zero after adding.
If so, fill the gap with the [nnz-1]'th element.
*/
spt_SparseTensorCollectZeros(Y);
/* Sort the indices */
sptSparseTensorSortIndex(Y, 1, nthreads);
return 0;
}
|
fisher.c | /** @file fisher.c
** @brief Fisher - Declaration
** @author David Novotny
**/
/*
Copyright (C) 2013 David Novotny and Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
/**
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page fisher Fisher Vector encoding (FV)
@author David Novotny
@author Andrea Vedaldi
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@ref fisher.h implements the Fisher Vectors (FV) image representation
@cite{perronnin06fisher} @cite{perronnin10improving}. A FV is a
statistics capturing the distribution of a set of vectors, usually a
set of local image descriptors.
@ref fisher-starting demonstrates how to use the C API to compute the
FV representation of an image. For further details refer to:
- @subpage fisher-fundamentals - Fisher Vector definition.
- @subpage fisher-derivation - Deriving the Fisher Vectors as a Fisher Kernel.
- @subpage fisher-kernel - The Fisher Kernel in general.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section fisher-starting Getting started
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The Fisher Vector encoding of a set of features is obtained by using
the function ::vl_fisher_encode. Note that the function requires a
@ref gmm "Gaussian Mixture Model" (GMM) of the encoded feature
distribution. In the following code, the result of the coding process
is stored in the @c enc array and the improved fisher vector
normalization is used.
@code
float * means ;
float * covariances ;
float * priors ;
float * posteriors ;
float * enc;
// create a GMM object and cluster input data to get means, covariances
// and priors of the estimated mixture
gmm = vl_gmm_new (VL_TYPE_FLOAT) ;
vl_gmm_cluster (gmm, data, dimension, numData, numClusters);
// allocate space for the encoding
enc = vl_malloc(sizeof(float) * 2 * dimension * numClusters);
// run fisher encoding
vl_fisher_encode
(enc, VL_F_TYPE,
vl_gmm_get_means(gmm), dimension, numClusters,
vl_gmm_get_covariances(gmm),
vl_gmm_get_priors(gmm),
dataToEncode, numDataToEncode,
VL_FISHER_FLAG_IMPROVED
) ;
@endcode
The performance of the standard Fisher Vector can be significantly
improved @cite{perronnin10improving} by using appropriate @ref
fisher-normalization normalizations. These are controlled by the @c
flag parameter of ::vl_fisher_encode.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page fisher-fundamentals Fisher vector fundamentals
@tableofcontents
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
This page describes the *Fisher Vector* (FV) of
@cite{perronnin06fisher} @cite{perronnin10improving}. See @ref fisher
for an overview of the C API and @ref fisher-kernel for its relation
to the more general notion of Fisher kernel.
The FV is an image representation obtained by pooling local image
features. It is frequently used as a global image descriptor in visual
classification.
While the FV can be @ref fisher-kernel "derived" as a special,
approximate, and improved case of the general Fisher Kernel framework,
it is easy to describe directly. Let $I = (\bx_1,\dots,\bx_N)$ be a
set of $D$ dimensional feature vectors (e.g. SIFT descriptors)
extracted from an image. Let
$\Theta=(\mu_k,\Sigma_k,\pi_k:k=1,\dots,K)$ be the parameters of a
@ref gmm "Gaussian Mixture Model" fitting the distribution of
descriptors. The GMM associates each vector $\bx_i$ to a mode $k$ in
the mixture with a strength given by the posterior probability:
\[
q_{ik} =
\frac
{\exp\left[-\frac{1}{2}(\bx_i - \mu_k)^T \Sigma_k^{-1} (\bx_i - \mu_k)\right]}
{\sum_{t=1}^K \exp\left[-\frac{1}{2}(\bx_i - \mu_t)^T \Sigma_k^{-1} (\bx_i - \mu_t)\right]}.
\]
For each mode $k$, consider the mean and covariance deviation vectors
@f{align*}
u_{jk} &=
{1 \over {N \sqrt{\pi_k}}}
\sum_{i=1}^{N}
q_{ik} \frac{x_{ji} - \mu_{jk}}{\sigma_{jk}},
\\
v_{jk} &=
{1 \over {N \sqrt{2 \pi_k}}}
\sum_{i=1}^{N}
q_{ik} \left[ \left(\frac{x_{ji} - \mu_{jk}}{\sigma_{jk}}\right)^2 - 1 \right].
@f}
where $j=1,2,\dots,D$ spans the vector dimensions. The FV of image $I$
is the stacking of the vectors $\bu_k$ and then of the vectors
$\bv_k$ for each of the $K$ modes in the Gaussian mixtures:
\[
\Phi(I) = \begin{bmatrix} \vdots \\ \bu_k \\ \vdots \\ \bv_k \\ \vdots \end{bmatrix}.
\]
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section fisher-normalization Normalization and improved Fisher vectors
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The *improved* Fisher Vector @cite{perronnin10improving} (IFV) improves the
classification performance of the representation by using to ideas:
1. *Non-linear additive kernel.* The Hellinger's kernel (or
Bhattacharya coefficient) can be used instead of the linear one at
no cost by signed squared rooting. This is obtained by applying the
function $|z| \sign z$ to each dimension of the vector $\Phi(I)$.
Other @ref homkermap "additive kernels" can also be used at an
increased space or time cost.
2. *Normalization.* Before using the representation in a linear model
(e.g. a @ref svm "support vector machine"), the vector $\Phi(I)$ is
further normalized by the $l^2$ norm (note that the standard Fisher
vector is normalized by the number of encoded feature vectors).
After square-rooting and normalization, the IFV is often used in a
linear classifier such as an @ref svm "SVM".
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@section fisher-fast Faster computations
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
In practice, several data to cluster assignments $q_{ik}$ are likely
to be very small or even negligible. The *fast* version of the FV sets
to zero all but the largest assignment for each input feature $\bx_i$.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page fisher-derivation Fisher vector derivation
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
The FV of @cite{perronnin06fisher} is a special case of the @ref
fisher-kernel "Fisher kernel" construction. It is designed to encode
local image features in a format that is suitable for learning and
comparison with simple metrics such as the Euclidean. In this
construction, an image is modeled as a collection of $D$-dimensional
feature vectors $I=(\bx_1,\dots,\bx_n)$ generated by a GMM with $K$
components $\Theta=(\mu_k,\Sigma_k,\pi_k:k=1,\dots,K)$. The covariance
matrices are assumed to be diagonal, i.e. $\Sigma_k = \diag
\bsigma_k^2$, $\bsigma_k \in \real^D_+$.
The generative model of *one* feature vector $\bx$ is given by the GMM
density function:
\[
p(\bx|\Theta) =
\sum_{k=1}^K \pi_k p(\bx|\Theta_k),
\quad
p(\bx|\Theta_k)
=
\frac{1}{(2\pi)^\frac{D}{2} (\det \Sigma_k)^{\frac{1}{2}}}
\exp
\left[
-\frac{1}{2}
(\bx - \mu_k)^\top \Sigma_k^{-1} (\bx - \mu_k)
\right]
\]
where $\Theta_k = (\mu_k,\Sigma_k)$. The Fisher Vector requires
computing the derivative of the log-likelihood function with respect
to the various model parameters. Consider in particular the parameters
$\Theta_k$ of a mode. Due to the exponent in the Gaussian density
function, the derivative can be written as
\[
\nabla_{\Theta_k} p(\bx|\Theta_k) =
p(\bx|\Theta_k)
g(\bx|\Theta_k)
\]
for a simple vector function $g$. The derivative of the log-likelihood
function is then
\[
\nabla_{\Theta_k} \log p(\bx|\Theta)
=
\frac{\pi_k p(\bx|\Theta_k)}{\sum_{t=1}^K \pi_k p(\bx|\Theta_k)}
g(\bx|\Theta_k)
=
q_k(\bx) g(\bx|\Theta_k)
\]
where $q_k(\bx)$ is the soft-assignment of the point $\bx$ to the mode
$k$. We make the approximation that $q_k(\bx)\approx 1$ if $\bx$ is
sampled from mode $k$ and $\approx 0$ otherwise
@cite{perronnin06fisher}. Hence one gets:
\[
E_{\bx \sim p(\bx|\Theta)}
[
\nabla_{\Theta_k} \log p(\bx|\Theta)
\nabla_{\Theta_t} \log p(\bx|\Theta)^\top
]
\approx
\begin{cases}
\pi_k E_{\bx \sim p(\bx|\Theta_k)} [ g(\bx|\Theta_k) g(\bx|\Theta_k)^\top], & t = k, \\
0, & t\not=k.
\end{cases}
\]
Thus under this approximation there is no correlation between the
parameters of the various Gaussian modes.
The function $g$ can be further broken down as the stacking of the
derivative w.r.t. the mean and the diagonal covariance.
\[
g(\bx|\Theta_k)
=
\begin{bmatrix}
g(\bx|\mu_k) \\
g(\bx|\bsigma_k)
\end{bmatrix},
\quad
[g(\bx|\mu_k)]_j
=
\frac{x_j - \mu_{jk}}{\sigma_{jk}^2},
\quad
[g(\bx|\bsigma_k^2)]_j
=
\frac{1}{2\sigma_{jk}^2}
\left(
\left(\frac{x_j - \mu_{jk}}{\sigma_{jk}}\right)^2
-
1
\right)
\]
Thus the covariance of the model (Fisher information) is diagonal and
the diagonal entries are given by
\[
H_{\mu_{jk}} = \pi_k E[g(\bx|\mu_{jk})g(\bx|\mu_{jk})]
= \frac{\pi_k}{\sigma_{jk}^2},
\quad
H_{\sigma_{jk}^2} = \frac{\pi_k}{2 \sigma_{jk}^4}.
\]
where in the calculation it was used the fact that the fourth moment
of the standard Gaussian distribution is 3. Multiplying the inverse
square root of the matrix $H$ by the derivative of the log-likelihood
function results in the Fisher vector encoding of one image feature
$\bx$:
\[
\Phi_{\mu_{jk}}(\bx) = H_{\mu_{jk}}^{-\frac{1}{2}} q_k(\bx) g(\bx|\mu_{jk})
= q_k(\bx) \frac{x_j - \mu_{jk}}{\sqrt{\pi_k}\sigma_{jk}},
\qquad
\Phi_{\sigma^2_{jk}}(\bx) =
\frac{q_k(\bx)}{\sqrt{2 \pi_k}}
\left(
\left(\frac{x_j - \mu_{jk}}{\sigma_{jk}}\right)^2
-
1
\right)
\]
Assuming that features are sampled i.i.d. from the GMM results in the
formulas given in @ref fisher-fundamentals (note the normalization
factor). Note that:
* The Fisher components relative to the prior probabilities $\pi_k$
have been ignored. This is because they have little effect on the
representation @cite{perronnin10improving}.
* Technically, the derivation of the Fisher Vector for multiple image
features requires the number of features to be the same in both
images. Ultimately, however, the representation can be computed by
using any number of features.
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
@page fisher-kernel Fisher kernel
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
This page discusses the Fisher Kernels (FK) of
@cite{jaakkola98exploiting} and shows how the FV of
@cite{perronnin06fisher} can be derived from it as a special case. The
FK induces a similarity measures between data points $\bx$ and $\bx'$
from a parametric generative model $p(\bx|\Theta)$ of the data. The
parameter $\Theta$ of the model is selected to fit the a-priori
distribution of the data, and is usually the Maximum Likelihood (MLE)
estimate obtained from a set of training examples. Once the generative
model is learned, each particular datum $\bx$ is represented by
looking at how it affects the MLE parameter estimate. This effect is
measured by computing the gradient of the log-likelihood term
corresponding to $\bx$:
\[
\hat\Phi(\bx) = \nabla_\Theta \log p(\bx|\Theta)
\]
The vectors $\hat\Phi(\bx)$ should be appropriately scaled before they
can be meaningfully compared. This is obtained by *whitening* the data
by multiplying the vectors by the inverse of the square root of their
*covariance matrix*. The covariance matrix can be obtained from the
generative model $p(\bx|\Theta)$ itself. Since $\Theta$ is the ML
parameter and $\hat\Phi(\bx)$ is the gradient of the log-likelihood
function, its expected value $E[\hat\Phi(\bx)]$ is zero. Thus, since
the vectors are already centered, their covariance matrix is simply:
\[
H = E_{\bx \sim p(\bx|\Theta)} [\hat\Phi(\bx) \hat\Phi(\bx)^\top]
\]
Note that $H$ is also the *Fisher information matrix* of the
model. The final FV encoding $\Phi(\bx)$ is given by the whitened
gradient of the log-likelihood function, i.e.:
\[
\Phi(\bx) = H^{-\frac{1}{2}} \nabla_\Theta \log p(\bx|\Theta).
\]
Taking the inner product of two such vectors yields the *Fisher
kernel*:
\[
K(\bx,\bx')
= \langle \Phi(\bx),\Phi(\bx') \rangle
= \nabla_\Theta \log p(\bx|\Theta)^\top H^{-1} \nabla_\Theta \log p(\bx'|\Theta).
\]
**/
#include "fisher.h"
#include "gmm.h"
#include "mathop.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef VL_FISHER_INSTANTIATING
static vl_size
VL_XCAT(_vl_fisher_encode_, SFX)
(TYPE * enc,
TYPE const * means, vl_size dimension, vl_size numClusters,
TYPE const * covariances,
TYPE const * priors,
TYPE const * data, vl_size numData,
int flags)
{
vl_size dim;
vl_index i_cl, i_d;
vl_size numTerms = 0 ;
TYPE * posteriors ;
TYPE * sqrtInvSigma;
assert(numClusters >= 1) ;
assert(dimension >= 1) ;
posteriors = vl_malloc(sizeof(TYPE) * numClusters * numData);
sqrtInvSigma = vl_malloc(sizeof(TYPE) * dimension * numClusters);
memset(enc, 0, sizeof(TYPE) * 2 * dimension * numClusters) ;
for (i_cl = 0 ; i_cl < (signed)numClusters ; ++i_cl) {
for(dim = 0; dim < dimension; dim++) {
sqrtInvSigma[i_cl*dimension + dim] = sqrt(1.0 / covariances[i_cl*dimension + dim]);
}
}
VL_XCAT(vl_get_gmm_data_posteriors_, SFX)(posteriors, numClusters, numData,
priors,
means, dimension,
covariances,
data) ;
/* sparsify posterior assignments with the FAST option */
if (flags & VL_FISHER_FLAG_FAST) {
for(i_d = 0; i_d < (signed)numData; i_d++) {
/* find largest posterior assignment for datum i_d */
vl_index best = 0 ;
TYPE bestValue = posteriors[i_d * numClusters] ;
for (i_cl = 1 ; i_cl < (signed)numClusters; ++ i_cl) {
TYPE p = posteriors[i_cl + i_d * numClusters] ;
if (p > bestValue) {
bestValue = p ;
best = i_cl ;
}
}
/* make all posterior assignments zero but the best one */
for (i_cl = 0 ; i_cl < (signed)numClusters; ++ i_cl) {
posteriors[i_cl + i_d * numClusters] =
(TYPE)(i_cl == best) ;
}
}
}
#if defined(_OPENMP)
#pragma omp parallel for default(shared) private(i_cl, i_d, dim) num_threads(vl_get_max_threads()) reduction(+:numTerms)
#endif
for(i_cl = 0; i_cl < (signed)numClusters; ++ i_cl) {
TYPE uprefix;
TYPE vprefix;
TYPE * uk = enc + i_cl*dimension ;
TYPE * vk = enc + i_cl*dimension + numClusters * dimension ;
/*
If the GMM component is degenerate and has a null prior, then it
must have null posterior as well. Hence it is safe to skip it. In
practice, we skip over it even if the prior is very small; if by
any chance a feature is assigned to such a mode, then its weight
would be very high due to the division by priors[i_cl] below.
*/
if (priors[i_cl] < 1e-6) { continue ; }
for(i_d = 0; i_d < (signed)numData; i_d++) {
TYPE p = posteriors[i_cl + i_d * numClusters] ;
if (p < 1e-6) continue ;
numTerms += 1;
for(dim = 0; dim < dimension; dim++) {
TYPE diff = data[i_d*dimension + dim] - means[i_cl*dimension + dim] ;
diff *= sqrtInvSigma[i_cl*dimension + dim] ;
*(uk + dim) += p * diff ;
*(vk + dim) += p * (diff * diff - 1);
}
}
if (numData > 0) {
uprefix = 1/(numData*sqrt(priors[i_cl]));
vprefix = 1/(numData*sqrt(2*priors[i_cl]));
for(dim = 0; dim < dimension; dim++) {
*(uk + dim) = *(uk + dim) * uprefix;
*(vk + dim) = *(vk + dim) * vprefix;
}
}
}
vl_free(posteriors);
vl_free(sqrtInvSigma) ;
if (flags & VL_FISHER_FLAG_SQUARE_ROOT) {
for(dim = 0; dim < 2 * dimension * numClusters ; dim++) {
TYPE z = enc [dim] ;
if (z >= 0) {
enc[dim] = VL_XCAT(vl_sqrt_, SFX)(z) ;
} else {
enc[dim] = - VL_XCAT(vl_sqrt_, SFX)(- z) ;
}
}
}
if (flags & VL_FISHER_FLAG_NORMALIZED) {
TYPE n = 0 ;
for(dim = 0 ; dim < 2 * dimension * numClusters ; dim++) {
TYPE z = enc [dim] ;
n += z * z ;
}
n = VL_XCAT(vl_sqrt_, SFX)(n) ;
n = VL_MAX(n, 1e-12) ;
for(dim = 0 ; dim < 2 * dimension * numClusters ; dim++) {
enc[dim] /= n ;
}
}
return numTerms ;
}
#else
/* not VL_FISHER_INSTANTIATING */
#ifndef __DOXYGEN__
#define FLT VL_TYPE_FLOAT
#define TYPE float
#define SFX f
#define VL_FISHER_INSTANTIATING
#include "fisher.c"
#define FLT VL_TYPE_DOUBLE
#define TYPE double
#define SFX d
#define VL_FISHER_INSTANTIATING
#include "fisher.c"
#endif
/* not VL_FISHER_INSTANTIATING */
#endif
/* ================================================================ */
#ifndef VL_FISHER_INSTANTIATING
/** @brief Fisher vector encoding of a set of vectors.
** @param dataType the type of the input data (::VL_TYPE_DOUBLE or ::VL_TYPE_FLOAT).
** @param enc Fisher vector (output).
** @param means Gaussian mixture means.
** @param dimension dimension of the data.
** @param numClusters number of Gaussians mixture components.
** @param covariances Gaussian mixture diagonal covariances.
** @param priors Gaussian mixture prior probabilities.
** @param data vectors to encode.
** @param numData number of vectors to encode.
** @param flags options.
** @return number of averaging operations.
**
** @a means and @a covariances have @a dimension rows and @a
** numCluster columns. @a priors is a vector of size @a
** numCluster. @a data has @a dimension rows and @a numData
** columns. @a enc is a vecotr of size equal to twice the product of
** @a dimension and @a numClusters. All these vectors and matrices
** have the same class, as specified by @a dataType, and must be
** stored in column-major format.
**
** @a flag can be used to control several options:
** ::VL_FISHER_FLAG_SQUARE_ROOT, ::VL_FISHER_FLAG_NORMALIZED,
** ::VL_FISHER_FLAG_IMPROVED, and ::VL_FISHER_FLAG_FAST.
**
** The function returns the number of averaging operations actually
** performed. The upper bound is the number of input features by the
** number of GMM modes; however, assignments are usually failry
** sparse, so this number is often much smaller. In particular, with
** the ::VL_FISHER_FLAG_FAST, is equal to the number of input
** features. This information can be used for diagnostic purposes.
**
** @sa @ref fisher
**/
vl_size
vl_fisher_encode
(void * enc, vl_type dataType,
void const * means, vl_size dimension, vl_size numClusters,
void const * covariances,
void const * priors,
void const * data, vl_size numData,
int flags
)
{
switch(dataType) {
case VL_TYPE_FLOAT:
return _vl_fisher_encode_f
((float *) enc,
(float const *) means, dimension, numClusters,
(float const *) covariances,
(float const *) priors,
(float const *) data, numData,
flags);
case VL_TYPE_DOUBLE:
return _vl_fisher_encode_d
((double *) enc,
(double const *) means, dimension, numClusters,
(double const *) covariances,
(double const *) priors,
(double const *) data, numData,
flags);
break;
default:
abort();
}
}
/* not VL_FISHER_INSTANTIATING */
#endif
#undef SFX
#undef TYPE
#undef FLT
#undef VL_FISHER_INSTANTIATING
|
generator_spgemm_csc_bsparse.c | /******************************************************************************
** Copyright (c) 2015-2017, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
/**
* @file
* This file is part of GemmCodeGenerator.
*
* @author Alexander Heinecke (alexander.heinecke AT mytum.de, http://www5.in.tum.de/wiki/index.php/Alexander_Heinecke,_M.Sc.,_M.Sc._with_honors)
*
* @section LICENSE
* Copyright (c) 2012-2014, Technische Universitaet Muenchen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* <DESCRIPTION>
*/
#include "generator_spgemm_csc_bsparse.h"
#include "generator_common.h"
#include <libxsmm_macros.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
LIBXSMM_INTERNAL_API_DEFINITION
void libxsmm_generator_spgemm_csc_bsparse( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const char* i_arch,
const unsigned int* i_row_idx,
const unsigned int* i_column_idx,
const double* i_values ) {
unsigned int l_n;
unsigned int l_z;
unsigned int l_column_elements;
unsigned int l_flop_count = 0;
char l_new_code[512];
int l_max_code_length = 511;
int l_code_length = 0;
LIBXSMM_UNUSED(i_values);
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* reset C if beta is zero */
if ( i_xgemm_desc->beta == 0 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
if ( (LIBXSMM_GEMM_FLAG_F32PREC & i_xgemm_desc->flags) == 0 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) { C[(l_n*%u)+l_m] = 0.0; }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
} else {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) { C[(l_n*%u)+l_m] = 0.0f; }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc);
}
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* determine the correct simd pragma for each architecture */
if ( ( strcmp( i_arch, "noarch" ) == 0 ) ||
( strcmp( i_arch, "wsm" ) == 0 ) ||
( strcmp( i_arch, "snb" ) == 0 ) ||
( strcmp( i_arch, "hsw" ) == 0 ) ) {
if ( i_xgemm_desc->m > 7 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(8)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else if ( i_xgemm_desc->m > 3 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(4)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else if ( i_xgemm_desc->m > 1 ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(2)\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
} else {}
if ( (i_xgemm_desc->m > 1) &&
((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) &&
((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
} else if ( ( strcmp( i_arch, "knc" ) == 0 ) ||
( strcmp( i_arch, "knl" ) == 0 ) ||
( strcmp( i_arch, "skx" ) == 0 ) ) {
if ( (i_xgemm_desc->m > 1) &&
((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) &&
((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(32)\n #pragma vector aligned\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
} else {
libxsmm_handle_error( io_generated_code, LIBXSMM_ERR_ARCH );
return;
}
/* generate the actuel kernel */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n", (unsigned int)i_xgemm_desc->m);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
for ( l_n = 0; l_n < (unsigned int)i_xgemm_desc->n; l_n++ ) {
l_column_elements = i_column_idx[l_n+1] - i_column_idx[l_n];
for ( l_z = 0; l_z < l_column_elements; l_z++ ) {
/* check k such that we just use rows which actually need to be multiplied */
if ( i_row_idx[i_column_idx[l_n] + l_z] < (unsigned int)i_xgemm_desc->k ) {
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[%u+l_m] += A[%u+l_m] * B[%u];\n", l_n * i_xgemm_desc->ldc, i_row_idx[i_column_idx[l_n] + l_z]*i_xgemm_desc->lda, i_column_idx[l_n] + l_z);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_flop_count += 2;
}
}
}
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n");
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
/* add flop counter */
l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * (unsigned int)i_xgemm_desc->m);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
|
SuperRayGenerator.h | /*
* Copyright(c) 2016, Youngsun Kwon, Donghyuk Kim, and Sung-eui Yoon, KAIST
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met :
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution.
* * Neither the name of SuperRay nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRIDMAP3D_SUPERRAY_SUPERRAY_GENERATOR_H
#define GRIDMAP3D_SUPERRAY_SUPERRAY_GENERATOR_H
#include <gridmap3D/gridmap3D_types.h>
#include <gridmap3D/Grid3DKey.h>
#include <gridmap3D/Pointcloud.h>
#include <gridmap3D_superray/SuperRayCloud.h>
#ifdef _OPENMP
#include <omp.h>
#pragma omp declare reduction (merge : std::vector<gridmap3D::SuperRay> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end()))
#endif
namespace gridmap3D{
class SuperRayGenerator{
public:
SuperRayGenerator(const double _resolution, const unsigned int _grid_max_val, const int _threshold = 0);
~SuperRayGenerator() {};
void GenerateSuperRay(const Pointcloud& _pc, const point3d& _origin, SuperRayCloud& _srcloud);
protected:
struct VoxelInfo;
struct Axis3D;
point3d originW; // origin point in World Space
Grid3DKey originKey; // origin key
// constants for generating super rays
double RESOLUTION; // resolution
double RESOLUTION_FACTOR; // 1.0 / resolution
unsigned int GRID_MAX_VAL; // offset
unsigned int THRESHOLD; // threshold for limiting to generate super rays for each voxel
// Functions for generating super rays
void GenerateSuperRay(const point3d_collection& _pointlist, std::vector<SuperRay>& _srcloud);
void GenerateSuperRay2D(const point3d_collection& _pointlist, Axis3D& _axis, VoxelInfo& _voxelinfo, std::vector<SuperRay>& _srcloud);
void GenerateSuperRay3D(const point3d_collection& _pointlist, Axis3D& _axis, VoxelInfo& _voxelinfo, std::vector<SuperRay>& _srcloud);
// Function for generating mapping line in 2-D
double GenerateMappingLine(VoxelInfo& _voxelinfo, const unsigned int& _axisX, const unsigned int& _axisY, std::vector<double>& _mappingPlane);
// Utility functions
typedef unordered_ns::unordered_map<Grid3DKey, std::vector<point3d>, Grid3DKey::KeyHash> Voxelized_Pointclouds;
void ComputeAxis(const point3d& _min, const point3d& _max, Axis3D& _axis);
// Re-implmentation for Key / coordinate conversion functions
inline Grid3DKey coordToKey(const point3d& coord) const {
return Grid3DKey(coordToKey(coord(0)), coordToKey(coord(1)), coordToKey(coord(2)));
}
inline key_type coordToKey(double coordinate) const {
return ((int)floor(RESOLUTION_FACTOR * coordinate)) + GRID_MAX_VAL;
}
// Structures that represents the traversal information
struct VoxelInfo{
VoxelInfo(void) {};
// Voxel Info.
point3d minW; // min position of voxel
point3d maxW; // max position of voxel
Grid3DKey voxelKey; // key of voxel
};
struct Axis3D{
Axis3D(void) : axisU(0), axisV(1), axisK(2) {};
unsigned int axisU; // Nearest Axis
unsigned int axisV; //
unsigned int axisK; // Farthest Axis
};
};
}
#endif
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image-private.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MaxTextExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AllocateSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
register ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
DestroySemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns). Also represents
% the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usually 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns', you
% can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickExport MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
register ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n);
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickExport void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
register ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor,
value;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&value);
min_value=value;
max_value=value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
register PixelPacket
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
q->red=ClampToQuantum(value);
q->green=q->red;
q->blue=q->red;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
register ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
serialLocationTest.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <ipps.h>
#include "xcloc_finter.h"
#include "test_suite.h"
#include "acousticGreens2D.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
/*
static
int computeTravelTimeTable(const int nx, const int ny, const int nz,
const double vel,
const double x0, const double y0, const double z0,
const double dx, const double dy, const double dz,
double xr, double yr, double zr,
double ttable[]);
*/
/*
static
int computeRandomReceiverLocations(const int nrec,
const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
double *xr);
static
int compute2DGreensFunctions(const int nsrc, const int nrec, const int nptsSig,
const double fcent, const double dt,
const bool lnorm, const bool lshift,
const double vel, const double rho,
const double Q,
const double pct,
const double srcScale[],
const double xs[],
const double xr[],
double **obsOut);
*/
#ifndef CHKERR
#define CHKERR(ierr, msg) \
{ \
if (ierr != EXIT_SUCCESS) \
{ \
fprintf(stderr, "ERROR calling %s: %s line %d\n", msg, __func__, __LINE__); \
return EXIT_FAILURE; \
} \
};
#endif
int test_serial_dsmLocation(void)
{
const int nrec = 20;
double dt = 1.0/6000.0; // Sampling rate is 6000 Hz
double fcent = 400.0; // Dominant resolution is vmin/fcent ~ 5.0m (for plotting)
bool lnorm = false; // Don't normalize ricker wavelet (max will be 1)
bool lshift = true; // Make wavelet start at time 0
//const double pct = 8.0; // 8 pct taper
double x0 = 0.0; // Model origin in x is 0 km
double x1 = 1000.0; // Model extent in x is 1 km
double y0 = 0.0; // Model origin in y is 0 km
double y1 = 1000.0; // Model extent in y is 1 km
double z0 = 0.0; // Problem is 2d - make z equal to 0.0
double z1 = 0.0; // Problem is 2d - make z equal to 0.0
double vel = 3100.0; // constant velocity (m/s)
double rho = 2700.0; // constant density (kg/m**3)
double Q = 9.e2; // add some damping
double tmodel = 0.5; // max modeling time is the traveltime from the
// furthest point in the medium to the reciever
// plus some
int nptsSig = (int) (round(tmodel/dt)) + 1;
// rest of geometry
int nx = 512;
int ny = 512;
int nz = 1;
int ngrd = nx*ny*nz;
double dx = (x1 - x0)/(double) (nx - 1); // should be less than 2m
double dy = (y1 - y0)/(double) (ny - 1); // should be less than 2m
double dz = 0.0;
int i, ierr, is, it;
// Set the receiver location to the center of the model
const int nsrc = 2;
const double srcScale[2] = {1, 1.1};
double xs[6] = {x0 + (double) (2*nx/7)*dx,
y0 + (double) (6*ny/7)*dy,
z0,// Source has to be at zero for the 2d example
x0 + (double) (5*nx/8)*dx,
y0 + (double) (3*ny/8)*dy,
z0};
double *xr = NULL;
double *obs = NULL;
// Scatter the receivers
xr = (double *) calloc((size_t) (3*nrec), sizeof(double));
ierr = acousticGreens2D_computeRandomReceiverLocations(nrec,
x0, y0, z0,
x1, y1, z1,
xr);
CHKERR(ierr, "failed making receiver locations");
// Compute the greens functions
ierr = acousticGreens2D_computeGreensFunctions(nsrc, nrec, nptsSig,
fcent, dt,
lnorm, lshift,
vel, rho,
Q,
srcScale, xs, xr,
&obs);
CHKERR(ierr, "failed computing acoustic greens fns");
//------------------------Compute the correlograms -----------------------//
fprintf(stdout, "%s: Computing correlograms...\n", __func__);
int nsignals = nrec;
int verbose = 0;
int prec = XCLOC_SINGLE_PRECISION; //0;
int accuracy = XCLOC_HIGH_ACCURACY; //0;
bool ldoAutoCorrs = false;
int nwork =-1;
int nxcs;
int *xcPairs = NULL;
xcloc_utils_computeDefaultXCTable(ldoAutoCorrs, nsignals, nwork,
XCLOC_FORTRAN_NUMBERING,
&nxcs, xcPairs, &ierr);
CHKERR(ierr, "computeDefaultXCTable workspace query");
nwork = 2*nxcs;
xcPairs = calloc((size_t) nwork, sizeof(int));
xcloc_utils_computeDefaultXCTable(ldoAutoCorrs, nsignals, nwork,
XCLOC_FORTRAN_NUMBERING,
&nxcs, xcPairs, &ierr);
CHKERR(ierr, "computeDefaultXCTable");
// Initialize the cross-correlation table
xcloc_fdxc_initialize(nptsSig, nptsSig,
nxcs, xcPairs,
verbose, prec, accuracy, &ierr);
CHKERR(ierr, "failed initializing fdxc");
xcloc_fdxc_setSignals64f(nptsSig, nptsSig, nsignals, obs, &ierr);
CHKERR(ierr, "failed to set signals");
xcloc_fdxc_computePhaseCorrelograms(&ierr);
CHKERR(ierr, "failed to compute phase correlograms");
int nptsInXCs;
xcloc_fdxc_getCorrelogramLength(&nptsInXCs, &ierr);
CHKERR(ierr, "failed to get number of points in xcs");
double *xcs = (double *) calloc((size_t) (nxcs*nptsInXCs), sizeof(double));
float *xcs32 = (float *) calloc((size_t) (nxcs*nptsInXCs), sizeof(float));
xcloc_fdxc_getCorrelograms64f(nptsInXCs, nxcs, xcs, &ierr);
xcloc_fdxc_getCorrelograms32f(nptsInXCs, nxcs, xcs32, &ierr);
//------------------------Filter the Correlograms-------------------------//
int nTaps = 301;
int ftype = XCLOC_SPXC_ENVELOPE_FILTER;
xcloc_spxc_initialize(nTaps, ftype, &ierr);
double *xcsFilt = (double *) calloc((size_t) (nxcs*nptsInXCs), sizeof(double));
xcloc_spxc_filterXCsOutOfPlace64f(nptsInXCs, nptsInXCs, nxcs,
xcs, xcsFilt, &ierr);
float *xcsFilt32 = (float *) calloc((size_t) (nxcs*nptsInXCs), sizeof(float));
xcloc_spxc_filterXCsOutOfPlace32f(nptsInXCs, nptsInXCs, nxcs,
xcs32, xcsFilt32, &ierr);
FILE *fl = fopen("envelope.txt", "w");
for (int i=0; i<nptsInXCs; i++)
{
int j = i;
fprintf(fl, "%f %e %e %e\n", (i-nptsInXCs/2)*dt, xcs32[j], xcsFilt32[j], xcsFilt[j]);
}
fclose(fl);
xcloc_spxc_finalize();
// return 0;
//---------------------------Compute the DSM------------------------------//
fprintf(stdout, "%s: Initializing DSM...\n", __func__);
int nxcPairs = nxcs;
xcloc_dsmxc_initialize(ngrd, nxcPairs, nptsInXCs,
dt, xcPairs, verbose, &ierr);
CHKERR(ierr, "failed to initialize dsm");
fprintf(stdout, "%s: Setting travel time tables...\n", __func__);
double *ttable = (double *) calloc((size_t) ngrd, sizeof(double));
for (i=0; i<nrec; i++)
{
acousticGreens2D_computeTravelTimeTable(nx, ny, nz,
vel, x0, y0, z0, dx, dy, dz,
xr[3*i], xr[3*i+1], xr[3*i+2],
ttable);
xcloc_dsmxc_signalToTableIndex(i+1, &it, &ierr);
xcloc_dsmxc_setTable64f(it, ngrd, ttable, &ierr);
}
free(ttable);
fprintf(stdout, "%s: Setting observations...\n", __func__);
xcloc_dsmxc_setCorrelograms64f(nptsInXCs, nptsInXCs, nxcs, xcsFilt, &ierr);
fprintf(stdout, "%s: Computing dsm...\n", __func__);
xcloc_dsmxc_compute(&ierr);
CHKERR(ierr, "failed to compute dsm");
float *image = (float *) calloc((size_t) ngrd, sizeof(float));
xcloc_dsmxc_getImage32f(ngrd, image, &ierr);
CHKERR(ierr, "failed to get image");
float maxValue;
int maxIndex;
xcloc_dsmxc_getImageMax(&maxIndex, &maxValue, &ierr);
CHKERR(ierr, "failed to get image max");
maxIndex = maxIndex - 1; // Fortran to C
bool lfound = false;
for (is=0; is<nsrc; is++)
{
int ixs = (int) ((xs[3*is+0] - x0)/dx + 0.5);
int iys = (int) ((xs[3*is+1] - y0)/dy + 0.5);
if (iys*nx + ixs == maxIndex){lfound = true;}
fprintf(stdout, "%s: (maxIndex,trueIndex)=(%d,%d) has value %f\n",
__func__, maxIndex, iys*nx + ixs, image[iys*nx+ixs]);
}
if (!lfound)
{
fprintf(stderr, "%s: Failed to find a source in image\n", __func__);
return EXIT_FAILURE;
}
/*
printf("src1: %f %f\n", xs[0], xs[1]);
printf("src2: %f %f\n", xs[3], xs[4]);
FILE *ftemp = fopen("dsm2d.txt", "w");
for (int iy=0; iy<ny; iy++)
{
for (int ix=0; ix<nx; ix++)
{
fprintf(ftemp, "%e %e %e\n", x0+ix*dx, y0+iy*dy, image[iy*nx+ix]);
}
fprintf(ftemp, "\n");
}
fclose(ftemp);
*/
free(image);
// Free memory
xcloc_fdxc_finalize();
xcloc_dsmxc_finalize();
free(xcPairs);
free(xcs);
if (xcs32 != NULL){free(xcs32);}
free(obs);
free(xr);
free(xcsFilt);
free(xcsFilt32);
return EXIT_SUCCESS;
}
/*
int computeTravelTimeTable(const int nx, const int ny, const int nz,
const double vel,
const double x0, const double y0, const double z0,
const double dx, const double dy, const double dz,
double xr, double yr, double zr,
double ttable[])
{
double diff_x, diff_y, diff_z, dist2, slow, x, y, z;
int igrd, ix, iy, iz;
slow = 1.0/vel;
for (iz=0; iz<nz; iz++)
{
for (iy=0; iy<ny; iy++)
{
#pragma omp simd
for (ix=0; ix<nx; ix++)
{
x = x0 + (double) ix*dx;
y = y0 + (double) iy*dy;
z = z0 + (double) iz*dz;
diff_x = xr - x;
diff_y = yr - y;
diff_z = zr - z;
dist2 = diff_x*diff_x + diff_y*diff_y + diff_z*diff_z;
igrd = iz*nx*ny + iy*nx + ix;
ttable[igrd] = sqrt(dist2)*slow;
}
}
}
return 0;
}
*/
double computeTravelTimesToReceivers(const int nrec,
const double vel,
const double xsrc[],
const double xrec[],
double ttimes[])
{
double dx, dy, dz, slow;
int i;
if (vel <= 0.0)
{
fprintf(stderr, "%s: velocity must be positive\n", __func__);
return -1;
}
slow = 1.0/vel;
#pragma omp simd
for (i=0; i<nrec; i++)
{
dx = xrec[3*i+0] - xsrc[0];
dy = xrec[3*i+1] - xsrc[1];
dz = xrec[3*i+2] - xsrc[2];
ttimes[i] = sqrt(dx*dx + dy*dy + dz*dz)*slow;
}
return 0;
}
double gaussianCorrelogram(const int nptsInXC,
const double samplingPeriod,
const double deltaT,
const double t0,
const double sigma1,
const double sigma2,
double xc[])
{
int i;
double arg, res, res2, t, two_sigma12_p_sigma22, xden, xfact;
two_sigma12_p_sigma22 = 2.0*(sigma1*sigma1 + sigma2*sigma2);
xfact = 1.0/sqrt(M_PI*two_sigma12_p_sigma22);
xden = 1.0/two_sigma12_p_sigma22;
#pragma omp simd
for (i=0; i<nptsInXC; i++)
{
t = t0 + (double) i*samplingPeriod;
res = t - deltaT;
res2 = res*res;
arg =-res2*xden;
xc[i] = xfact*exp(arg);
}
return 0;
}
/*
int computeRandomReceiverLocations(const int nrec,
const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
double *xr)
{
double dx, dy, dz;
int irec;
dx = 0.0;
dy = 0.0;
dz = 0.0;
if (fabs(x1 - x0) > 1.e-8){dx = fabs(x1 - x0);}
if (fabs(y1 - y0) > 1.e-8){dy = fabs(y1 - y0);}
if (fabs(z1 - z0) > 1.e-8){dz = fabs(z1 - z0);}
for (irec=0; irec<nrec; irec++)
{
xr[3*irec+0] = x0;
xr[3*irec+1] = y0;
xr[3*irec+2] = z0;
xr[3*irec+0] = ((double) rand()/RAND_MAX)*dx;
xr[3*irec+1] = ((double) rand()/RAND_MAX)*dy;
xr[3*irec+2] = ((double) rand()/RAND_MAX)*dz;
}
return 0;
}
int compute2DGreensFunctions(const int nsrc, const int nrec, const int nptsSig,
const double fcent, const double dt,
const bool lnorm, const bool lshift,
const double vel, const double rho,
const double Q,
const double pct,
const double srcScale[],
const double xs[],
const double xr[],
double **obsOut)
{
double *obs, *obsTemp, *stf;
int ierr, isrc, k;
// Compute the Green's functions using a Ricker wavelet
fprintf(stdout, "%s: Computing Ricker wavelet...\n", __func__);
stf = (double *) calloc((size_t) nptsSig, sizeof(double));
ierr = acousticGreens2D_computeRickerWavelet(nptsSig, dt, fcent,
lnorm, lshift, stf);
if (ierr != 0)
{
fprintf(stderr, "%s: Failed to compute ricker wavelet\n", __func__);
return EXIT_FAILURE;
}
fprintf(stdout, "%s: Computing synthetics...\n", __func__);
obsTemp = (double *) calloc((size_t) (nptsSig*nrec), sizeof(double));
obs = (double *) calloc((size_t) (nptsSig*nrec), sizeof(double));
for (isrc=0; isrc<nsrc; isrc++)
{
ierr = acousticGreens2D_computeGreensLineSource(nrec, vel, rho, Q,
nptsSig, dt,
&xs[3*isrc], xr,
stf, obsTemp);
if (ierr != 0)
{
fprintf(stderr, "%s: Error computing line source greens fns\n",
__func__);
return -1;
}
for (k=0; k<nptsSig*nrec; k++)
{
obs[k] = obs[k] + srcScale[isrc]*obsTemp[k];
}
//ippsAddProductC_64f(obsTemp, srcScale[isrc], obs, nptsSig*nrec);
}
free(stf);
free(obsTemp);
*obsOut = obs;
return EXIT_SUCCESS;
}
*/
|
GB_binop__plus_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_uint64)
// A*D function (colscale): GB (_AxD__plus_uint64)
// D*A function (rowscale): GB (_DxB__plus_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_uint64)
// C=scalar+B GB (_bind1st__plus_uint64)
// C=scalar+B' GB (_bind1st_tran__plus_uint64)
// C=A+scalar GB (_bind2nd__plus_uint64)
// C=A'+scalar GB (_bind2nd_tran__plus_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_UINT64 || GxB_NO_PLUS_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_AxB_saxpy3_slice_balanced.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3_slice_balanced: construct balanced tasks for GB_AxB_saxpy3
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If the mask is present but must be discarded, this function returns
// GrB_NO_VALUE, to indicate that the analysis was terminated early.
#include "GB_AxB_saxpy3.h"
// control parameters for generating parallel tasks
#define GB_NTASKS_PER_THREAD 2
#define GB_COSTLY 1.2
#define GB_FINE_WORK 2
#define GB_MWORK_ALPHA 0.01
#define GB_MWORK_BETA 0.10
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Fine_fl, int64_t) ; \
GB_WERK_POP (Fine_slice, int64_t) ; \
GB_WERK_POP (Coarse_Work, int64_t) ; \
GB_WERK_POP (Coarse_initial, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_FREE_WORK (&SaxpyTasks, SaxpyTasks_size) ; \
}
//------------------------------------------------------------------------------
// GB_hash_table_size
//------------------------------------------------------------------------------
// flmax is the max flop count for computing A*B(:,j), for any vector j that
// this task computes. If the mask M is present, flmax also includes the
// number of entries in M(:,j). GB_hash_table_size determines the hash table
// size for this task, which is twice the smallest power of 2 larger than
// flmax. If flmax is large enough, the hash_size is returned as cvlen, so
// that Gustavson's method will be used instead of the Hash method.
// By default, Gustavson vs Hash is selected automatically. AxB_method can be
// selected via the descriptor or a global setting, as the non-default
// GxB_AxB_GUSTAVSON or GxB_AxB_HASH settings, to enforce the selection of
// either of those methods. However, if Hash is selected but the hash table
// equals or exceeds cvlen, then Gustavson's method is used instead.
static inline int64_t GB_hash_table_size
(
int64_t flmax, // max flop count for any vector computed by this task
int64_t cvlen, // vector length of C
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
int64_t hash_size ;
if (AxB_method == GxB_AxB_GUSTAVSON || flmax >= cvlen/2)
{
//----------------------------------------------------------------------
// use Gustavson if selected explicitly or if flmax is large
//----------------------------------------------------------------------
hash_size = cvlen ;
}
else
{
//----------------------------------------------------------------------
// flmax is small; consider hash vs Gustavson
//----------------------------------------------------------------------
// hash_size = 2 * (smallest power of 2 >= flmax)
hash_size = ((uint64_t) 2) << (GB_FLOOR_LOG2 (flmax) + 1) ;
bool use_Gustavson ;
if (AxB_method == GxB_AxB_HASH)
{
// always use Hash method, unless the hash_size >= cvlen
use_Gustavson = (hash_size >= cvlen) ;
}
else
{
// default: auto selection:
// use Gustavson's method if hash_size is too big
use_Gustavson = (hash_size >= cvlen/12) ;
}
if (use_Gustavson)
{
hash_size = cvlen ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (hash_size) ;
}
//------------------------------------------------------------------------------
// GB_create_coarse_task: create a single coarse task
//------------------------------------------------------------------------------
// Compute the max flop count for any vector in a coarse task, determine the
// hash table size, and construct the coarse task.
static inline void GB_create_coarse_task
(
int64_t kfirst, // coarse task consists of vectors kfirst:klast
int64_t klast,
GB_saxpy3task_struct *SaxpyTasks,
int taskid, // taskid for this coarse task
int64_t *Bflops, // size bnvec; cum sum of flop counts for vectors of B
int64_t cvlen, // vector length of B and C
double chunk,
int nthreads_max,
int64_t *Coarse_Work, // workspace for parallel reduction for flop count
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
//--------------------------------------------------------------------------
// find the max # of flops for any vector in this task
//--------------------------------------------------------------------------
int64_t nk = klast - kfirst + 1 ;
int nth = GB_nthreads (nk, chunk, nthreads_max) ;
// each thread finds the max flop count for a subset of the vectors
int tid ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (tid = 0 ; tid < nth ; tid++)
{
int64_t my_flmax = 1, istart, iend ;
GB_PARTITION (istart, iend, nk, tid, nth) ;
for (int64_t i = istart ; i < iend ; i++)
{
int64_t kk = kfirst + i ;
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
my_flmax = GB_IMAX (my_flmax, fl) ;
}
Coarse_Work [tid] = my_flmax ;
}
// combine results from each thread
int64_t flmax = 1 ;
for (tid = 0 ; tid < nth ; tid++)
{
flmax = GB_IMAX (flmax, Coarse_Work [tid]) ;
}
// check the parallel computation
#ifdef GB_DEBUG
int64_t flmax2 = 1 ;
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
flmax2 = GB_IMAX (flmax2, fl) ;
}
ASSERT (flmax == flmax2) ;
#endif
//--------------------------------------------------------------------------
// define the coarse task
//--------------------------------------------------------------------------
SaxpyTasks [taskid].start = kfirst ;
SaxpyTasks [taskid].end = klast ;
SaxpyTasks [taskid].vector = -1 ;
SaxpyTasks [taskid].hsize = GB_hash_table_size (flmax, cvlen, AxB_method) ;
SaxpyTasks [taskid].Hi = NULL ; // assigned later
SaxpyTasks [taskid].Hf = NULL ; // assigned later
SaxpyTasks [taskid].Hx = NULL ; // assigned later
SaxpyTasks [taskid].my_cjnz = 0 ; // for fine tasks only
SaxpyTasks [taskid].leader = taskid ;
SaxpyTasks [taskid].team_size = 1 ;
}
//------------------------------------------------------------------------------
// GB_AxB_saxpy3_slice_balanced: create balanced tasks for saxpy3
//------------------------------------------------------------------------------
GrB_Info GB_AxB_saxpy3_slice_balanced
(
// inputs
GrB_Matrix C, // output matrix
const GrB_Matrix M, // optional mask matrix
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input matrix A
const GrB_Matrix B, // input matrix B
GrB_Desc_Value AxB_method, // Default, Gustavson, or Hash
// outputs
GB_saxpy3task_struct **SaxpyTasks_handle,
size_t *SaxpyTasks_size_handle,
bool *apply_mask, // if true, apply M during sapxy3
bool *M_in_place, // if true, use M in-place
int *ntasks, // # of tasks created (coarse and fine)
int *nfine, // # of fine tasks created
int *nthreads, // # of threads to use
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
(*apply_mask) = false ;
(*M_in_place) = false ;
(*ntasks) = 0 ;
(*nfine) = 0 ;
(*nthreads) = 0 ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT_MATRIX_OK (A, "A for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT_MATRIX_OK (B, "B for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (B)) ;
ASSERT (GB_JUMBLED_OK (B)) ;
ASSERT (!GB_ZOMBIES (B)) ;
//--------------------------------------------------------------------------
// determine the # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// define result and workspace
//--------------------------------------------------------------------------
GB_saxpy3task_struct *restrict SaxpyTasks = NULL ;
size_t SaxpyTasks_size = 0 ;
GB_WERK_DECLARE (Coarse_initial, int64_t) ; // initial coarse tasks
GB_WERK_DECLARE (Coarse_Work, int64_t) ; // workspace for flop counts
GB_WERK_DECLARE (Fine_slice, int64_t) ;
GB_WERK_DECLARE (Fine_fl, int64_t) ; // size max(nnz(B(:,j)))
//--------------------------------------------------------------------------
// get A, and B
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t avlen = A->vlen ;
const int64_t anvec = A->nvec ;
const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int8_t *restrict Bb = B->b ;
const int64_t *restrict Bi = B->i ;
const int64_t bvdim = B->vdim ;
const int64_t bnz = GB_nnz_held (B) ;
const int64_t bnvec = B->nvec ;
const int64_t bvlen = B->vlen ;
const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ;
int64_t cvlen = avlen ;
int64_t cvdim = bvdim ;
//--------------------------------------------------------------------------
// compute flop counts for each vector of B and C
//--------------------------------------------------------------------------
int64_t Mwork = 0 ;
int64_t *restrict Bflops = C->p ; // use C->p as workspace for Bflops
GB_OK (GB_AxB_saxpy3_flopcount (&Mwork, Bflops, M, Mask_comp, A, B,
Context)) ;
int64_t total_flops = Bflops [bnvec] ;
double axbflops = total_flops - Mwork ;
GBURBLE ("axbwork %g ", axbflops) ;
if (Mwork > 0) GBURBLE ("mwork %g ", (double) Mwork) ;
//--------------------------------------------------------------------------
// determine if the mask M should be applied, or done later
//--------------------------------------------------------------------------
if (M == NULL)
{
//----------------------------------------------------------------------
// M is not present
//----------------------------------------------------------------------
(*apply_mask) = false ;
}
else if (GB_IS_BITMAP (M) || GB_as_if_full (M))
{
//----------------------------------------------------------------------
// M is present and full, bitmap, or sparse/hyper with all entries
//----------------------------------------------------------------------
// Choose all-hash or all-Gustavson tasks, and apply M during saxpy3.
(*apply_mask) = true ;
// The work for M has not yet been added Bflops.
// Each vector M(:,j) has cvlen entries.
Mwork = cvlen * cvdim ;
if (!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON))
{
if (axbflops < (double) Mwork * GB_MWORK_BETA)
{
// The mask is too costly to scatter into the Hf workspace.
// Leave it in place and use all-hash tasks.
AxB_method = GxB_AxB_HASH ;
}
else
{
// Scatter M into Hf and use all-Gustavson tasks.
AxB_method = GxB_AxB_GUSTAVSON ;
}
}
if (AxB_method == GxB_AxB_HASH)
{
// Use the hash method for all tasks (except for those tasks which
// require a hash table size >= cvlen; those tasks use Gustavson).
// Do not scatter the mask into the Hf hash workspace. The work
// for the mask is not accounted for in Bflops, so the hash tables
// can be small.
(*M_in_place) = true ;
GBURBLE ("(use mask in-place) ") ;
}
else
{
// Use the Gustavson method for all tasks, and scatter M into the
// fine Gustavson workspace. The work for M is not yet in the
// Bflops cumulative sum. Add it now.
ASSERT (AxB_method == GxB_AxB_GUSTAVSON)
int nth = GB_nthreads (bnvec, chunk, nthreads_max) ;
int64_t kk ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (kk = 0 ; kk <= bnvec ; kk++)
{
Bflops [kk] += cvlen * (kk+1) ;
}
total_flops = Bflops [bnvec] ;
GBURBLE ("(use mask) ") ;
}
}
else if (axbflops < ((double) Mwork * GB_MWORK_ALPHA))
{
//----------------------------------------------------------------------
// M is costly to use; apply it after C=A*B
//----------------------------------------------------------------------
// Do not use M during the computation of A*B. Instead, compute C=A*B
// and then apply the mask later. Tell the caller that the mask should
// not be applied, so that it will be applied later in GB_mxm.
(*apply_mask) = false ;
GBURBLE ("(discard mask) ") ;
GB_FREE_ALL ;
return (GrB_NO_VALUE) ;
}
else
{
//----------------------------------------------------------------------
// use M during saxpy3
//----------------------------------------------------------------------
(*apply_mask) = true ;
GBURBLE ("(use mask) ") ;
}
//--------------------------------------------------------------------------
// determine # of threads and # of initial coarse tasks
//--------------------------------------------------------------------------
(*nthreads) = GB_nthreads ((double) total_flops, chunk, nthreads_max) ;
int ntasks_initial = ((*nthreads) == 1) ? 1 :
(GB_NTASKS_PER_THREAD * (*nthreads)) ;
//--------------------------------------------------------------------------
// give preference to Gustavson when using few threads
//--------------------------------------------------------------------------
if ((*nthreads) <= 8 &&
(!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON)))
{
// Unless a specific method has been explicitly requested, see if
// Gustavson should be used with a small number of threads.
// Matrix-vector has a maximum intensity of 1, so this heuristic only
// applies to GrB_mxm.
double abnz = GB_nnz (A) + GB_nnz (B) + 1 ;
double workspace = (double) ntasks_initial * (double) cvlen ;
double intensity = total_flops / abnz ;
GBURBLE ("(intensity: %0.3g workspace/(nnz(A)+nnz(B)): %0.3g",
intensity, workspace / abnz) ;
if (intensity >= 8 && workspace < abnz)
{
// work intensity is large, and Gustvason workspace is modest;
// use Gustavson for all tasks
AxB_method = GxB_AxB_GUSTAVSON ;
GBURBLE (": select Gustvason) ") ;
}
else
{
// use default task creation: mix of Hash and Gustavson
GBURBLE (") ") ;
}
}
//--------------------------------------------------------------------------
// determine target task size
//--------------------------------------------------------------------------
double target_task_size = ((double) total_flops) / ntasks_initial ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
double target_fine_size = target_task_size / GB_FINE_WORK ;
target_fine_size = GB_IMAX (target_fine_size, chunk) ;
//--------------------------------------------------------------------------
// determine # of parallel tasks
//--------------------------------------------------------------------------
int ncoarse = 0 ; // # of coarse tasks
int max_bjnz = 0 ; // max (nnz (B (:,j))) of fine tasks
// FUTURE: also use ultra-fine tasks that compute A(i1:i2,k)*B(k,j)
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// construct initial coarse tasks
//----------------------------------------------------------------------
GB_WERK_PUSH (Coarse_initial, ntasks_initial + 1, int64_t) ;
if (Coarse_initial == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_pslice (Coarse_initial, Bflops, bnvec, ntasks_initial, true) ;
//----------------------------------------------------------------------
// split the work into coarse and fine tasks
//----------------------------------------------------------------------
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
// where j == GBH (Bh, kk)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]);
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// vectors kcoarse_start to kk-1 form a single
// coarse task
ncoarse++ ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// vectors kk will be split into multiple fine tasks
max_bjnz = GB_IMAX (max_bjnz, bjnz) ;
int team_size = ceil (jflops / target_fine_size) ;
(*nfine) += team_size ;
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// vectors kcoarse_start to klast-1 form a single
// coarse task
ncoarse++ ;
}
}
else
{
// This coarse task is OK as-is.
ncoarse++ ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
if (bnvec == 1)
{
// If B is a single vector, and is computed by a single thread,
// then a single fine task is used.
(*nfine) = 1 ;
ncoarse = 0 ;
}
else
{
// One thread uses a single coarse task if B is not a vector.
(*nfine) = 0 ;
ncoarse = 1 ;
}
}
(*ntasks) = ncoarse + (*nfine) ;
//--------------------------------------------------------------------------
// allocate the tasks, and workspace to construct fine tasks
//--------------------------------------------------------------------------
SaxpyTasks = GB_MALLOC_WORK ((*ntasks), GB_saxpy3task_struct,
&SaxpyTasks_size) ;
GB_WERK_PUSH (Coarse_Work, nthreads_max, int64_t) ;
if (max_bjnz > 0)
{
// also allocate workspace to construct fine tasks
GB_WERK_PUSH (Fine_slice, (*ntasks)+1, int64_t) ;
// Fine_fl will only fit on the Werk stack if max_bjnz is small,
// but try anyway, in case it fits. It is placed at the top of the
// Werk stack.
GB_WERK_PUSH (Fine_fl, max_bjnz+1, int64_t) ;
}
if (SaxpyTasks == NULL || Coarse_Work == NULL ||
(max_bjnz > 0 && (Fine_slice == NULL || Fine_fl == NULL)))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// clear SaxpyTasks
memset (SaxpyTasks, 0, SaxpyTasks_size) ;
//--------------------------------------------------------------------------
// create the tasks
//--------------------------------------------------------------------------
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// create the coarse and fine tasks
//----------------------------------------------------------------------
int nf = 0 ; // fine tasks have task id 0:nfine-1
int nc = (*nfine) ; // coarse task ids are nfine:ntasks-1
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]);
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// kcoarse_start:kk-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, kk-1,
SaxpyTasks, nc++, Bflops, cvlen, chunk,
nthreads_max, Coarse_Work, AxB_method) ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// count the work for each entry B(k,j). Do not
// include the work to scan M(:,j), since that will
// be evenly divided between all tasks in this team.
int64_t pB_start = GBP (Bp, kk, bvlen) ;
int nth = GB_nthreads (bjnz, chunk, nthreads_max) ;
int64_t s ;
#pragma omp parallel for num_threads(nth) \
schedule(static)
for (s = 0 ; s < bjnz ; s++)
{
// get B(k,j)
Fine_fl [s] = 1 ;
int64_t pB = pB_start + s ;
if (!GBB (Bb, pB)) continue ;
int64_t k = GBI (Bi, pB, bvlen) ;
// fl = flop count for just A(:,k)*B(k,j)
int64_t pA, pA_end ;
int64_t pleft = 0 ;
GB_lookup (A_is_hyper, Ah, Ap, avlen, &pleft,
anvec-1, k, &pA, &pA_end) ;
int64_t fl = pA_end - pA ;
Fine_fl [s] = fl ;
ASSERT (fl >= 0) ;
}
// cumulative sum of flops to compute A*B(:,j)
GB_cumsum (Fine_fl, bjnz, NULL, nth, Context) ;
// slice B(:,j) into fine tasks
int team_size = ceil (jflops / target_fine_size) ;
ASSERT (Fine_slice != NULL) ;
GB_pslice (Fine_slice, Fine_fl, bjnz, team_size, false);
// shared hash table for all fine tasks for A*B(:,j)
int64_t hsize =
GB_hash_table_size (jflops, cvlen, AxB_method) ;
// construct the fine tasks for C(:,j)=A*B(:,j)
int leader = nf ;
for (int fid = 0 ; fid < team_size ; fid++)
{
int64_t pstart = Fine_slice [fid] ;
int64_t pend = Fine_slice [fid+1] ;
int64_t fl = Fine_fl [pend] - Fine_fl [pstart] ;
SaxpyTasks [nf].start = pB_start + pstart ;
SaxpyTasks [nf].end = pB_start + pend - 1 ;
SaxpyTasks [nf].vector = kk ;
SaxpyTasks [nf].hsize = hsize ;
SaxpyTasks [nf].Hi = NULL ; // assigned later
SaxpyTasks [nf].Hf = NULL ; // assigned later
SaxpyTasks [nf].Hx = NULL ; // assigned later
SaxpyTasks [nf].my_cjnz = 0 ;
SaxpyTasks [nf].leader = leader ;
SaxpyTasks [nf].team_size = team_size ;
nf++ ;
}
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// kcoarse_start:klast-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, klast-1, SaxpyTasks,
nc++, Bflops, cvlen, chunk, nthreads_max,
Coarse_Work, AxB_method) ;
}
}
else
{
// This coarse task is OK as-is.
GB_create_coarse_task (kfirst, klast-1, SaxpyTasks,
nc++, Bflops, cvlen, chunk, nthreads_max,
Coarse_Work, AxB_method) ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
// create a single coarse task: hash or Gustavson
GB_create_coarse_task (0, bnvec-1, SaxpyTasks, 0, Bflops, cvlen, 1, 1,
Coarse_Work, AxB_method) ;
if (bnvec == 1)
{
// convert the single coarse task into a single fine task
SaxpyTasks [0].start = 0 ; // first entry in B(:,0)
SaxpyTasks [0].end = bnz - 1 ; // last entry in B(:,0)
SaxpyTasks [0].vector = 0 ;
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
(*SaxpyTasks_handle) = SaxpyTasks ;
(*SaxpyTasks_size_handle) = SaxpyTasks_size ;
return (GrB_SUCCESS) ;
}
|
nr_numint.c | /*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "cint.h"
#include "gto/grid_ao_drv.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
#include <assert.h>
#define BOXSIZE 56
int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice,
int *ao_loc)
{
if (non0table == NULL || shls_slice == NULL || ao_loc == NULL) {
return 0;
}
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
int bas_id;
int box_id = 0;
int bound = BOXSIZE;
int has0 = 0;
empty[box_id] = 1;
for (bas_id = sh0; bas_id < sh1; bas_id++) {
empty[box_id] &= !non0table[bas_id];
if (ao_loc[bas_id] == bound) {
has0 |= empty[box_id];
box_id++;
bound += BOXSIZE;
empty[box_id] = 1;
} else if (ao_loc[bas_id] > bound) {
has0 |= empty[box_id];
box_id++;
bound += BOXSIZE;
empty[box_id] = !non0table[bas_id];
}
}
return has0;
}
static void dot_ao_dm(double *vm, double *ao, double *dm,
int nao, int nocc, int ngrids, int bgrids,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
char empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double D1 = 1;
double beta = 0;
if (has0) {
int box_id, bas_id, blen, i, j;
size_t b0;
for (box_id = 0; box_id < nbox; box_id++) {
if (!empty[box_id]) {
b0 = box_id * BOXSIZE;
blen = MIN(nao-b0, BOXSIZE);
dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen,
&D1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc,
&beta, vm, &ngrids);
beta = 1.0;
}
}
if (beta == 0) { // all empty
for (i = 0; i < nocc; i++) {
for (j = 0; j < bgrids; j++) {
vm[i*ngrids+j] = 0;
}
}
}
} else {
dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao,
&D1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids);
}
}
/* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */
void VXCdot_ao_dm(double *vm, double *ao, double *dm,
int nao, int nocc, int ngrids, int nbas,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
#pragma omp parallel default(none) \
shared(vm, ao, dm, nao, nocc, ngrids, nbas, \
non0table, shls_slice, ao_loc)
{
int ip, ib;
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_dm(vm+ip, ao+ip, dm,
nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE),
non0table+ib*nbas, shls_slice, ao_loc);
}
}
}
/* vv[n,m] = ao1[n,ngrids] * ao2[m,ngrids] */
static void dot_ao_ao(double *vv, double *ao1, double *ao2,
int nao, int ngrids, int bgrids, int hermi,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
char empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double D1 = 1;
if (has0) {
int ib, jb, leni, lenj;
int j1 = nbox;
size_t b0i, b0j;
for (ib = 0; ib < nbox; ib++) {
if (!empty[ib]) {
b0i = ib * BOXSIZE;
leni = MIN(nao-b0i, BOXSIZE);
if (hermi) {
j1 = ib + 1;
}
for (jb = 0; jb < j1; jb++) {
if (!empty[jb]) {
b0j = jb * BOXSIZE;
lenj = MIN(nao-b0j, BOXSIZE);
dgemm_(&TRANS_T, &TRANS_N, &lenj, &leni, &bgrids, &D1,
ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids,
&D1, vv+b0i*nao+b0j, &nao);
} }
} }
} else {
dgemm_(&TRANS_T, &TRANS_N, &nao, &nao, &bgrids,
&D1, ao2, &ngrids, ao1, &ngrids, &D1, vv, &nao);
}
}
/* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */
void VXCdot_ao_ao(double *vv, double *ao1, double *ao2,
int nao, int ngrids, int nbas, int hermi,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
memset(vv, 0, sizeof(double) * nao * nao);
#pragma omp parallel default(none) \
shared(vv, ao1, ao2, nao, ngrids, nbas, hermi, \
non0table, shls_slice, ao_loc)
{
int ip, ib;
double *v_priv = calloc(nao*nao, sizeof(double));
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_ao(v_priv, ao1+ip, ao2+ip,
nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi,
non0table+ib*nbas, shls_slice, ao_loc);
}
#pragma omp critical
{
for (ip = 0; ip < nao*nao; ip++) {
vv[ip] += v_priv[ip];
}
}
free(v_priv);
}
if (hermi != 0) {
NPdsymm_triu(nao, vv, hermi);
}
}
|
rose_dep_distance.c | /*
*Test dependence distance
* */
#include "omp.h"
void foo()
{
int i;
int a[100];
/* Constant offset*/
for (i = 0; i <= 98; i += 1) {
a[i + 3] = a[i - 5] + 1;
}
}
void foo2(int j,int k)
{
int i;
int a[100];
/*variable offset*/
for (i = 0; i <= 98; i += 1) {
a[i + j] = a[i + k] + 1;
}
}
int b[100][100];
void foo3()
{
int i;
int j;
/*two level with constant offset*/
#pragma omp parallel for private (i,j)
for (i = 1; i <= 99; i += 1) {
for (j = 1; j <= 99; j += 1) {
b[i][j] = b[i][j - 1] + 1;
}
}
}
|
atomic_read_codegen.c | // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
// REQUIRES: x86-registered-target
#ifndef HEADER
#define HEADER
_Bool bv, bx;
char cv, cx;
unsigned char ucv, ucx;
short sv, sx;
unsigned short usv, usx;
int iv, ix;
unsigned int uiv, uix;
long lv, lx;
unsigned long ulv, ulx;
long long llv, llx;
unsigned long long ullv, ullx;
float fv, fx;
double dv, dx;
long double ldv, ldx;
_Complex int civ, cix;
_Complex float cfv, cfx;
_Complex double cdv, cdx;
typedef int int4 __attribute__((__vector_size__(16)));
int4 int4x;
struct BitFields {
int : 32;
int a : 31;
} bfx;
struct BitFields_packed {
int : 32;
int a : 31;
} __attribute__ ((__packed__)) bfx_packed;
struct BitFields2 {
int : 31;
int a : 1;
} bfx2;
struct BitFields2_packed {
int : 31;
int a : 1;
} __attribute__ ((__packed__)) bfx2_packed;
struct BitFields3 {
int : 11;
int a : 14;
} bfx3;
struct BitFields3_packed {
int : 11;
int a : 14;
} __attribute__ ((__packed__)) bfx3_packed;
struct BitFields4 {
short : 16;
int a: 1;
long b : 7;
} bfx4;
struct BitFields4_packed {
short : 16;
int a: 1;
long b : 7;
} __attribute__ ((__packed__)) bfx4_packed;
typedef float float2 __attribute__((ext_vector_type(2)));
float2 float2x;
// Register "0" is currently an invalid register for global register variables.
// Use "esp" instead of "0".
// register int rix __asm__("0");
register int rix __asm__("esp");
// CHECK-LABEL: @main(
int main(void) {
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store i8
#pragma omp atomic read
bv = bx;
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store i8
#pragma omp atomic read
cv = cx;
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store i8
#pragma omp atomic read
ucv = ucx;
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
// CHECK: store i16
#pragma omp atomic read
sv = sx;
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
// CHECK: store i16
#pragma omp atomic read
usv = usx;
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
// CHECK: store i32
#pragma omp atomic read
iv = ix;
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
// CHECK: store i32
#pragma omp atomic read
uiv = uix;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i64
#pragma omp atomic read
lv = lx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i64
#pragma omp atomic read
ulv = ulx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i64
#pragma omp atomic read
llv = llx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i64
#pragma omp atomic read
ullv = ullx;
// CHECK: load atomic i32, i32* bitcast (float* {{.*}} monotonic, align 4
// CHECK: bitcast i32 {{.*}} to float
// CHECK: store float
#pragma omp atomic read
fv = fx;
// CHECK: load atomic i64, i64* bitcast (double* {{.*}} monotonic, align 8
// CHECK: bitcast i64 {{.*}} to double
// CHECK: store double
#pragma omp atomic read
dv = dx;
// CHECK: [[LD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* {{.*}} monotonic, align 16
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
// CHECK: store i128 [[LD]], i128* [[BITCAST]]
// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* [[LDTEMP]]
// CHECK: store x86_fp80 [[LD]]
#pragma omp atomic read
ldv = ldx;
// CHECK: call{{.*}} void @__atomic_load(i64 noundef 8,
// CHECK: store i32
// CHECK: store i32
#pragma omp atomic read
civ = cix;
// CHECK: call{{.*}} void @__atomic_load(i64 noundef 8,
// CHECK: store float
// CHECK: store float
#pragma omp atomic read
cfv = cfx;
// CHECK: call{{.*}} void @__atomic_load(i64 noundef 16,
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store double
// CHECK: store double
#pragma omp atomic seq_cst read
cdv = cdx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i8
#pragma omp atomic read
bv = ulx;
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store i8
#pragma omp atomic read
cv = bx;
// CHECK: load atomic i8, i8* {{.*}} seq_cst, align 1
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store i8
#pragma omp atomic read seq_cst
ucv = cx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i16
#pragma omp atomic read
sv = ulx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i16
#pragma omp atomic read
usv = lx;
// CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store i32
#pragma omp atomic seq_cst, read
iv = uix;
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
// CHECK: store i32
#pragma omp atomic read
uiv = ix;
// CHECK: call{{.*}} void @__atomic_load(i64 noundef 8,
// CHECK: store i64
#pragma omp atomic read
lv = cix;
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
// CHECK: store i64
#pragma omp atomic read
ulv = fx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i64
#pragma omp atomic read
llv = dx;
// CHECK: load atomic i128, i128* {{.*}} monotonic, align 16
// CHECK: store i64
#pragma omp atomic read
ullv = ldx;
// CHECK: call{{.*}} void @__atomic_load(i64 noundef 8,
// CHECK: store float
#pragma omp atomic read
fv = cix;
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
// CHECK: store double
#pragma omp atomic read
dv = sx;
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bx;
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store i32
// CHECK: store i32
#pragma omp atomic read
civ = bx;
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
// CHECK: store float
// CHECK: store float
#pragma omp atomic read
cfv = usx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store double
// CHECK: store double
#pragma omp atomic read
cdv = llx;
// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic, align 16
// CHECK: [[I128PTR:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
// CHECK: store i128 [[I128VAL]], i128* [[I128PTR]]
// CHECK: [[LD:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
// CHECK: extractelement <4 x i32> [[LD]]
// CHECK: store i8
#pragma omp atomic read
bv = int4x[0];
// CHECK: [[LD:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%{{.+}}* @{{.+}} to i8*), i64 4) to i32*) monotonic, align 4
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
// CHECK: ashr i32 [[SHL]], 1
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx.a;
// CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 noundef 4, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @bfx_packed to i8*), i64 4), i8* noundef [[LDTEMP_VOID_PTR]], i32 noundef 0)
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
// CHECK: ashr i32 [[SHL]], 1
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx_packed.a;
// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @bfx2, i32 0, i32 0) monotonic, align 4
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: ashr i32 [[LD]], 31
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx2.a;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*), i64 3) monotonic, align 1
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: ashr i8 [[LD]], 7
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx2_packed.a;
// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @bfx3, i32 0, i32 0) monotonic, align 4
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 7
// CHECK: ashr i32 [[SHL]], 18
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx3.a;
// CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i24* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 noundef 3, i8* noundef getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @bfx3_packed to i8*), i64 1), i8* noundef [[LDTEMP_VOID_PTR]], i32 noundef 0)
// CHECK: [[LD:%.+]] = load i24, i24* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i24 [[LD]], 7
// CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10
// CHECK: sext i24 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx3_packed.a;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic, align 8
// CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 47
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 63
// CHECK: trunc i64 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4.a;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic, align 1
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7
// CHECK: [[ASHR:%.+]] = ashr i8 [[SHL]], 7
// CHECK: sext i8 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic relaxed read
ldv = bfx4_packed.a;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic, align 8
// CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 40
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 57
// CHECK: store x86_fp80
#pragma omp atomic read relaxed
ldv = bfx4.b;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) acquire, align 1
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1
// CHECK: sext i8 [[ASHR]] to i64
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store x86_fp80
#pragma omp atomic read acquire
ldv = bfx4_packed.b;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic, align 8
// CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
// CHECK: store i64 [[LD]], i64* [[BITCAST]]
// CHECK: [[LD:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
// CHECK: extractelement <2 x float> [[LD]]
// CHECK: store i64
#pragma omp atomic read
ulv = float2x.x;
// CHECK: call{{.*}} i{{[0-9]+}} @llvm.read_register
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store double
#pragma omp atomic read seq_cst
dv = rix;
return 0;
}
#endif
|
master.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// GCC generates code that does not call the runtime for the master construct
// XFAIL: gcc
#include "callback.h"
#include <omp.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_fuzzy_address(1);
x++;
}
print_current_address(2);
}
printf("%" PRIu64 ": x=%d\n", ompt_get_thread_data()->value, x);
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_master'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_master_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], task_id=[[TASK_ID:[0-9]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_master_end:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS_END:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS_END]]
|
sageInterface.h | #ifndef ROSE_SAGE_INTERFACE
#define ROSE_SAGE_INTERFACE
#include "sage3basic.hhh"
#include <stdint.h>
#include <utility>
#include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
#include "OmpAttribute.h"
#if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference
SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project );
#else
SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project );
#endif
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "rewrite.h"
#endif
// DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser.
#include "astUnparseAttribute.h"
#include <set>
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "LivenessAnalysis.h"
#include "abstract_handle.h"
#include "ClassHierarchyGraph.h"
#endif
// DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h
//! A global function for getting the string associated with an enum (which is defined in global scope)
ROSE_DLL_API std::string getVariantName (VariantT v);
// DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE
// This namespace is specific to interface functions that operate on the Sage III AST.
// The name was chosen so as not to conflict with other classes within ROSE.
// This will become the future home of many interface functions which operate on
// the AST and which are generally useful to users. As a namespace multiple files can be used
// to represent the compete interface and different developers may contribute interface
// functions easily.
// Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008)
// We could add simpler layers of support for construction of IR nodes by
// hiding many details in "makeSg***()" functions. Such functions would
// return pointers to the associated Sg*** objects and would be able to hide
// many IR specific details, including:
// memory handling
// optional parameter settings not often required
// use of Sg_File_Info objects (and setting them as transformations)
//
// namespace AST_Interface (this name is taken already by some of Qing's work :-)
//! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode()
#define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode()
/** Functions that are useful when operating on the AST.
*
* The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate
* higher level interfaces which work with the IR. This namespace collects functions that operate on the IR and support
* numerous types of operations that are common to general analysis and transformation of the AST. */
namespace SageInterface
{
// Liao 6/22/2016: keep records of loop init-stmt normalization, later help undo it to support autoPar.
struct Transformation_Record
{
// a lookup table to check if a for loop has been normalized for its c99-style init-stmt
std::map <SgForStatement* , bool > forLoopInitNormalizationTable;
// Detailed record about the original declaration (1st in the pair) and the normalization generated new declaration (2nd in the pair)
std::map <SgForStatement* , std::pair<SgVariableDeclaration*, SgVariableDeclaration*> > forLoopInitNormalizationRecord;
} ;
ROSE_DLL_API extern Transformation_Record trans_records;
// DQ (4/3/2014): Added general AST support separate from the AST.
// Container and API for analysis information that is outside of the AST and as a result
// prevents frequent modification of the IR.
class DeclarationSets
{
// DQ (4/3/2014): This stores all associated declarations as a map of sets.
// the key to the map is the first nondefining declaration and the elements of the set are
// all of the associated declarations (including the defining declaration).
private:
//! Map of first-nondefining declaration to all other associated declarations.
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap;
public:
void addDeclaration(SgDeclarationStatement* decl);
const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl);
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap();
bool isLocatedInDefiningScope(SgDeclarationStatement* decl);
};
// DQ (4/3/2014): This constructs a data structure that holds analysis information about
// the AST that is separate from the AST. This is intended to be a general mechanism
// to support analysis information without constantly modifying the IR.
DeclarationSets* buildDeclarationSets(SgNode*);
//! An internal counter for generating unique SgName
ROSE_DLL_API extern int gensym_counter;
// tps : 28 Oct 2008 - support for finding the main interpretation
SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file);
//! Get the unsigned value of a disassembled constant.
uint64_t getAsmConstant(SgAsmValueExpression* e);
//! Get the signed value of a disassembled constant.
int64_t getAsmSignedConstant(SgAsmValueExpression *e);
//! Function to add "C" style comment to statement.
void addMessageStatement( SgStatement* stmt, std::string message );
//! A persistent attribute to represent a unique name for an expression
class UniqueNameAttribute : public AstAttribute
{
private:
std::string name;
public:
UniqueNameAttribute(std::string n="") {name =n; };
void set_name (std::string n) {name = n;};
std::string get_name () {return name;};
};
// DQ (3/2/2009): Added support for collectiong an merging the referenced symbols in the outlined
// function into the list used to edit the outlined code subtree to fixup references (from symbols
// in the original file to the symbols in the newer separate file).
// typedef rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> ReplacementMapType;
// void supplementReplacementSymbolMap ( const ReplacementMapTraversal::ReplacementMapType & inputReplacementMap );
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
inline size_t hash_value(SgNode* t) {return (size_t)t;}
#endif
#if 0
// DQ (8/3/2015): We expect that this is not used and is generating a warnings so we
// can best fix it by removing it.
struct hash_nodeptr
{
// CH (4/9/2010): Use boost::hash instead
//#ifndef _MSC_VER
#if 0
//rose_hash::hash<char*> hasher;
#endif
public:
size_t operator()(SgNode* node) const
{
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
return (size_t) hash_value(node);
#else
return (size_t) node;
#endif
}
};
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void supplementReplacementSymbolMap ( rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> & inputReplacementMap );
#endif
#endif
//------------------------------------------------------------------------
//@{
/*! @name Symbol tables
\brief utility functions for symbol tables
*/
// Liao 1/22/2008, used for get symbols for generating variable reference nodes
// ! Find a variable symbol in current and ancestor scopes for a given name
ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
//! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL.
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList);
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing.
//!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL);
// Liao, 1/24/2008, find exact match for a function
//!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName,
const SgType* t,
SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
// DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support).
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#if 0
// DQ (8/13/2013): This function does not make since any more, now that we have made the symbol
// table handling more precise and we have to provide template parameters for any template lookup.
// We also have to know if we want to lookup template classes, template functions, or template
// member functions (since each have specific requirements).
SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#endif
#if 0
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
// Where these are called we might not know enough information about the template parameters or function
// types, for example.
SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
#endif
// DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments.
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL);
ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL);
// DQ (7/17/2011): Added function from cxx branch that I need here for the Java support.
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope);
/*! \brief set_name of symbol in symbol table.
This function extracts the symbol from the relavant symbol table,
changes the name (at the declaration) and reinserts it into the
symbol table.
\internal I think this is what this function does, I need to double check.
*/
// DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName
// to this location where it can be a part of the interface for the Sage III AST.
ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name);
/*! \brief Output function type symbols in global function type symbol table.
*/
void outputGlobalFunctionTypeSymbolTable ();
// DQ (6/27/2005):
/*! \brief Output the local symbol tables.
\implementation Each symbol table is output with the file infor where it is located in the source code.
*/
ROSE_DLL_API void outputLocalSymbolTables (SgNode * node);
class OutputLocalSymbolTables:public AstSimpleProcessing
{
public:
void visit (SgNode * node);
};
/*! \brief Regenerate the symbol table.
\implementation current symbol table must be NULL pointer before calling this
function (for safety, but is this a good idea?)
*/
// DQ (9/28/2005):
void rebuildSymbolTable (SgScopeStatement * scope);
/*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted.
*/
void clearUnusedVariableSymbols (SgNode* root = NULL);
// DQ (3/1/2009):
//! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table.
void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Stringify
\brief Generate a useful string (name) to describe a SgNode
*/
/*! \brief Generate a useful name to describe the SgNode
\internal default names are used for SgNode objects that can not be associated with a name.
*/
// DQ (9/21/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgNode * node);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgStatement * stmt);
/*! \brief Generate a useful name to describe the expression
\internal default names are used for expressions that can not be associated with a name.
*/
std::string get_name (const SgExpression * expr);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgDeclarationStatement * declaration);
/*! \brief Generate a useful name to describe the scope
\internal default names are used for scope that cannot be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgScopeStatement * scope);
/*! \brief Generate a useful name to describe the SgSymbol
\internal default names are used for SgSymbol objects that cannot be associated with a name.
*/
// DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support).
std::string get_name (const SgSymbol * symbol);
/*! \brief Generate a useful name to describe the SgType
\internal default names are used for SgType objects that cannot be associated with a name.
*/
std::string get_name (const SgType * type);
/*! \brief Generate a useful name to describe the SgSupport IR node
*/
std::string get_name (const SgSupport * node);
/*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node
*/
std::string get_name (const SgLocatedNodeSupport * node);
/*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node
*/
std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive );
/*! \brief Generate a useful name to describe the SgToken IR node
*/
std::string get_name ( const SgToken* token );
// DQ (3/20/2016): Added to refactor some of the DSL infrastructure support.
/*! \brief Generate a useful name to support construction of identifiers from declarations.
This function permits names to be generated that will be unique across translation units
(a specific requirement different from the context of the get_name() functions above).
\internal This supports only a restricted set of declarations presently.
*/
std::string generateUniqueNameForUseAsIdentifier ( SgDeclarationStatement* declaration );
std::string generateUniqueNameForUseAsIdentifier_support ( SgDeclarationStatement* declaration );
/*! \brief Global map of name collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
extern std::map<std::string,int> local_name_collision_map;
extern std::map<std::string,SgNode*> local_name_to_node_map;
extern std::map<SgNode*,std::string> local_node_to_name_map;
/*! \brief Traversal to set the global map of names to node and node to names.collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
void computeUniqueNameForUseAsIdentifier( SgNode* astNode );
/*! \brief Reset map variables used to support generateUniqueNameForUseAsIdentifier() function.
*/
void reset_name_collision_map();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Class utilities
\brief
*/
/*! \brief Get the default destructor from the class declaration
*/
// DQ (6/21/2005): Get the default destructor from the class declaration
SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration *
classDeclaration);
/*! \brief Get the default constructor from the class declaration
*/
// DQ (6/22/2005): Get the default constructor from the class declaration
ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration *
classDeclaration);
/*! \brief Return true if template definition is in the class, false if outside of class.
*/
// DQ (8/27/2005):
bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl
* memberFunctionDeclaration);
/*! \brief Generate a non-defining (forward) declaration from a defining function declaration.
\internal should put into sageBuilder ?
*/
// DQ (9/17/2005):
SgTemplateInstantiationMemberFunctionDecl*
buildForwardFunctionDeclaration
(SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! Check if a SgNode is a declaration for a structure
bool isStructDeclaration(SgNode * node);
//! Check if a SgNode is a declaration for a union
bool isUnionDeclaration(SgNode * node);
#if 0
// DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration
// (so that it can handle template functions and member functions)
/*! \brief Return true if member function of a template member function,
of false if a non-template member function in a templated class.
*/
// DQ (8/27/2005):
bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl *
memberFunctionDeclaration);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Misc.
\brief Not sure the classifications right now
*/
//! Save AST into a pdf file. Start from a node to find its enclosing file node. The entire file's AST will be saved into a pdf.
void saveToPDF(SgNode* node, std::string filename);
void saveToPDF(SgNode* node); // enable calling from gdb
// DQ (2/12/2012): Added some diagnostic support.
//! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened.
void whereAmI(SgNode* node);
//! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp".
std::string extractPragmaKeyword(const SgPragmaDeclaration *);
//! Check if a node is SgOmp*Statement
ROSE_DLL_API bool isOmpStatement(SgNode* );
/*! \brief Return true if function is overloaded.
*/
// DQ (8/27/2005):
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
// DQ (2/14/2012): Added support function used for variable declarations in conditionals.
//! Support function used for variable declarations in conditionals
void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body);
//! Support function used for variable declarations in conditionals
void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body);
//! Support function used for variable declarations in conditionals
void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body);
//! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute")
void annotateExpressionsWithUniqueNames (SgProject* project);
//! Check if a SgNode is a main() function declaration
ROSE_DLL_API bool isMain (const SgNode* node);
// DQ (6/22/2005):
/*! \brief Generate unique name from C and C++ constructs. The name may contain space.
This is support for the AST merge, but is generally useful as a more general mechanism than
name mangling which is more closely ties to the generation of names to support link-time function name
resolution. This is more general than common name mangling in that it resolves more relevant differences
between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;").
\implementation current work does not support expressions.
*/
std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations);
/** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter.
* @param baseName the word to be included in the variable names. */
std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp");
// DQ (8/10/2010): Added const to first parameter.
// DQ (3/10/2007):
//! Generate a unique string from the source file position information
std::string declarationPositionString (const SgDeclarationStatement * declaration);
// DQ (1/20/2007):
//! Added mechanism to generate project name from list of file names
ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false );
//! Given a SgExpression that represents a named function (or bound member
//! function), return the mentioned function
SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func);
//! Get the mask expression from the header of a SgForAllStatement
SgExpression* forallMaskExpression(SgForAllStatement* stmt);
//! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t
void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t);
// DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation).
/*! \brief Support for faster mangled name generation (caching avoids recomputation).
*/
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void clearMangledNameCache (SgGlobal * globalScope);
void resetMangledNameCache (SgGlobal * globalScope);
#endif
std::string getMangledNameFromCache (SgNode * astNode);
std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName);
SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically
//! Used to have a struct declaration embedded into a variable declaration
void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl);
// DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the
// bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration );
//! Check if a defining declaration comes before of after the non-defining declaration.
bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration);
// DQ (10/19/2006): Function calls have interesting context dependent rules to determine if
// they are output with a global qualifier or not. Were this is true we have to avoid global
// qualifiers, since the function's scope has not been defined. This is an example of where
// qualification of function names in function calls are context dependent; an interesting
// example of where the C++ language is not friendly to source-to-source processing :-).
bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall);
/*! \brief Compute the intersection set for two ASTs.
This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST.
*/
ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL);
//! Deep copy an arbitrary subtree
ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree);
//! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e);
template <typename NodeType>
NodeType* deepCopy (const NodeType* subtree) {
return dynamic_cast<NodeType*>(deepCopyNode(subtree));
}
//! Deep copy an expression
ROSE_DLL_API SgExpression* copyExpression(SgExpression* e);
//!Deep copy a statement
ROSE_DLL_API SgStatement* copyStatement(SgStatement* s);
// from VarSym.cc in src/midend/astOutlining/src/ASTtools
//! Get the variable symbol for the first initialized name of a declaration stmt.
ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl);
//! Get the first initialized name of a declaration statement
ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl);
//! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now.
ROSE_DLL_API void myRemoveStatement(SgStatement* stmt);
ROSE_DLL_API bool isConstantTrue(SgExpression* e);
ROSE_DLL_API bool isConstantFalse(SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e);
//! Check if a declaration has a "static' modifier
bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt);
//! Set a declaration as static
ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt);
//! Check if a declaration has an "extern" modifier
ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt);
//! Set a declaration as extern
ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt);
//! Interface for creating a statement whose computation writes its answer into
//! a given variable.
class StatementGenerator {
public:
virtual ~StatementGenerator() {};
virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0;
};
//! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc)
//!
//! Return the left hand, right hand expressions and if the left hand variable is also being read
bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL);
//! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. For Dot and Arrow Expressions, their lhs is used to obtain SgInitializedName (coarse grain) by default. Otherwise, fine-grain rhs is used.
ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current, bool coarseGrain=true);
//! Build an abstract handle from an AST node, reuse previously built handle when possible
ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*);
//! Obtain a matching SgNode from an abstract handle string
ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string);
//! Dump information about a SgNode for debugging
ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc="");
//! Reorder a list of declaration statements based on their appearance order in source files
ROSE_DLL_API std::vector<SgDeclarationStatement*>
sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec);
// DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names.
//! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc.
// bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp );
bool isPrefixOperator( SgExpression* exp );
//! Check for proper names of possible prefix operators (used in isPrefixOperator()).
bool isPrefixOperatorName( const SgName & functionName );
//! Is an overloaded operator a postfix operator. (e.g. ).
bool isPostfixOperator( SgExpression* exp );
//! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()).
bool isIndexOperator( SgExpression* exp );
// DQ (1/10/2014): Adding more general support for token based unparsing.
//! Used to support token unparsing (when the output the trailing token sequence).
SgStatement* lastStatementOfScopeWithTokenInfo (SgScopeStatement* scope, std::map<SgNode*,TokenStreamSequenceToNodeMapping*> & tokenStreamSequenceMap);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST properties
\brief version, language properties of current AST.
*/
// std::string version(); // utility_functions.h, version number
/*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use!
*/
ROSE_DLL_API bool is_C_language ();
ROSE_DLL_API bool is_OpenMP_language ();
ROSE_DLL_API bool is_UPC_language ();
//! Check if dynamic threads compilation is used for UPC programs
ROSE_DLL_API bool is_UPC_dynamic_threads();
ROSE_DLL_API bool is_C99_language ();
ROSE_DLL_API bool is_Cxx_language ();
ROSE_DLL_API bool is_Java_language ();
ROSE_DLL_API bool is_Fortran_language ();
ROSE_DLL_API bool is_CAF_language ();
ROSE_DLL_API bool is_PHP_language();
ROSE_DLL_API bool is_Python_language();
ROSE_DLL_API bool is_Cuda_language();
ROSE_DLL_API bool is_OpenCL_language();
ROSE_DLL_API bool is_X10_language();
ROSE_DLL_API bool is_binary_executable();
ROSE_DLL_API bool is_mixed_C_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language ();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Scope
\brief
*/
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Assigns unique numbers to each SgScopeStatement of a function.
This is used to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void resetScopeNumbers (SgFunctionDefinition * functionDeclaration);
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Clears the cache of scope,integer pairs for the input function.
This is used to clear the cache of computed unique labels for scopes in a function.
This function should be called after any transformation on a function that might effect
the allocation of scopes and cause the existing unique numbers to be incorrect.
This is part of support to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void clearScopeNumbers (SgFunctionDefinition * functionDefinition);
//!Find the enclosing namespace of a declaration
SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration);
// SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
//!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor)
bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Preprocessing Information
\brief #if-#else-#end, comments, #include, etc
*/
//! Dumps a located node's preprocessing information.
void dumpPreprocInfo (SgLocatedNode* locatedNode);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file, add to be the last #include .. by default among existing headers, Or as the first header. Recommended for use.
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader, bool asLastHeader);
//! Insert a new header right before stmt, if there are existing headers attached to stmt, insert it as the last or first header as specified by asLastHeader
void insertHeader (SgStatement* stmt, PreprocessingInfo* newheader, bool asLastHeader);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader = false, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before);
//! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX.
ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL);
//! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon.
ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position.
ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef,
PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation.
ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf);
//!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo()
ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf);
//! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes.
ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target,
const std::string & text,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before);
//!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on.
ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target);
//@}
//! Build and attach comment onto the global scope of a source file
PreprocessingInfo* attachComment(
SgSourceFile * source_file,
const std::string & content,
PreprocessingInfo::DirectiveType directive_type = PreprocessingInfo::C_StyleComment,
PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before
);
//! Build and attach comment, comment style is inferred from the language type of the target node if not provided
ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before,
PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration);
// DQ (11/25/2009): Added matching support for adding comments to SgAsm nodes.
// Build and attach comment
// void attachComment(SgAsmStatement* target, const std::string & content );
// DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface
//! Add a string to be unparsed to support code generation for back-end specific tools or compilers.
ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation );
/**
* Add preproccessor guard around a given node.
* It surrounds the node with "#if guard" and "#endif"
*/
void guardNode(SgLocatedNode * target, std::string guard);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Source File Position
\brief set Sg_File_Info for a SgNode
*/
// ************************************************************************
// Newer versions of now depricated functions
// ************************************************************************
// DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder
// interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This
// function is the only function that should be called directly (though in a namespace we can't define permissions).
//! Set the source code positon for the current (input) node.
ROSE_DLL_API void setSourcePosition(SgNode* node);
// A better name might be "setSourcePositionForSubTree"
//! Set the source code positon for the subtree (including the root).
ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root);
//! DQ (5/1/2012): New function with improved name.
void setSourcePositionAsTransformation(SgNode *node);
// DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability).
void setSourcePositionPointersToNull(SgNode *node);
// ************************************************************************
// ************************************************************************
// Older deprecated functions
// ************************************************************************
// Liao, 1/8/2007, set file info. for a whole subtree as transformation generated
//! Set current node's source position as transformation generated
ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node);
//! Set current node's source position as NULL
ROSE_DLL_API void setOneSourcePositionNull(SgNode *node);
//! Recursively set source position info(Sg_File_Info) as transformation generated
ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root);
//! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool
// ROSE_DLL_API void setSourcePositionForTransformation_memoryPool();
//! Check if a node is from a system header file
ROSE_DLL_API bool insideSystemHeader (SgLocatedNode* node);
//! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage.
// ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode);
// ************************************************************************
//@}
//------------------------------------------------------------------------
//@{
/*! @name Data types
\brief
*/
// from src/midend/astInlining/typeTraits.h
// src/midend/astUtil/astInterface/AstInterface.h
//! Get the right bool type according to C or C++ language input
SgType* getBoolType(SgNode* n);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
////!
////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//!Get the data type of the first initialized name of a declaration statement
ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl);
//! Is a type default constructible? This may not quite work properly.
ROSE_DLL_API bool isDefaultConstructible(SgType* type);
//! Is a type copy constructible? This may not quite work properly.
ROSE_DLL_API bool isCopyConstructible(SgType* type);
//! Is a type assignable? This may not quite work properly.
ROSE_DLL_API bool isAssignable(SgType* type);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//! Check if a class type is a pure virtual class. True means that there is at least
//! one pure virtual function that has not been overridden.
//! In the case of an incomplete class type (forward declaration), this function returns false.
ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy);
#endif
//! Does a type have a trivial (built-in) destructor?
ROSE_DLL_API bool hasTrivialDestructor(SgType* t);
//! Is this type a non-constant reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isNonconstReference(SgType* t);
//! Is this type a const or non-const reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isReferenceType(SgType* t);
//! Is this type a pointer type? (Handles typedefs correctly)
ROSE_DLL_API bool isPointerType(SgType* t);
//! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to
//! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile,
//! it returns false for (int const * x) and (int const * const x) because these types point to a const int.
//! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns
//! false for const (int * const * x)
ROSE_DLL_API bool isPointerToNonConstType(SgType* type);
//! Is this a const type?
/* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char.
* Similarly, neither for const int b[10]; or const int & c =10;
* The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of
the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type".
*/
ROSE_DLL_API bool isConstType(SgType* t);
//! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers.
SgType* removeConst(SgType* t);
//! Is this a volatile type?
ROSE_DLL_API bool isVolatileType(SgType* t);
//! Is this a restrict type?
ROSE_DLL_API bool isRestrictType(SgType* t);
//! Is this a scalar type?
/*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary
*/
ROSE_DLL_API bool isScalarType(SgType* t);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
//!
//! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool.
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//! Check if a type is a struct type (a special SgClassType in ROSE)
ROSE_DLL_API bool isStructType(SgType* t);
//! Generate a mangled string for a given type based on Itanium C++ ABI
ROSE_DLL_API std::string mangleType(SgType* type);
//! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE
ROSE_DLL_API std::string mangleScalarType(SgType* type);
//! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types.
ROSE_DLL_API std::string mangleModifierType(SgModifierType* type);
//! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array.
ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t);
//! Get the number of dimensions of an array type
ROSE_DLL_API int getDimensionCount(SgType* t);
//! Get the element type of an array. It recursively find the base type for multi-dimension array types
ROSE_DLL_API SgType* getArrayElementType(SgType* t);
//! Get the element type of an array, pointer or string, or NULL if not applicable. This function only check one level base type. No recursion.
ROSE_DLL_API SgType* getElementType(SgType* t);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// Note, the first entry of the array is a SgNullExpression, iff the
/// first array dimension was not specified.
/// \code
/// int x[] = { 1, 2, 3 };
/// \endcode
/// note, the expression does not have to be a constant
/// \code
/// int x[i*5];
/// \endcode
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \param varref a reference to an array variable (the variable of type arrtype)
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// If the first array dimension was not specified an expression
/// that indicates that size is generated.
/// \code
/// int x[][3] = { 1, 2, 3, 4, 5, 6 };
/// \endcode
/// the entry for the first dimension will be:
/// \code
/// // 3 ... size of 2nd dimension
/// sizeof(x) / (sizeof(int) * 3)
/// \endcode
/// \pre arrtype is the array-type of varref
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
/// \post !isSgNullExpression(return-value[*])
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref);
/// \overload
/// \note see get_C_array_dimensions for SgVarRefExp for details.
/// \todo make initname const
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname);
//! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp.
ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL);
//! Collect variable references in array types. The default NodeQuery::querySubTree() will miss variables referenced in array type's index list. e.g. double *buffer = new double[numItems] ;
ROSE_DLL_API int collectVariableReferencesInArrayTypes (SgLocatedNode* root, Rose_STL_Container<SgNode*> & currentVarRefList);
//! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information.
/*!
* Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense.
AST graph for some examples:
- shared scalar: SgModifierType -->base type
- shared array: SgArrayType --> SgModiferType --> base type
- shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt
- shared to private: SgModifierType --> SgPointerType --> base type
- private to shared: SgPointerType --> SgModifierType --> base type
*/
ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL );
//! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property.
/*!
* ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property.
*/
ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL);
//! Check if a modifier type is a UPC shared type.
ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type);
//! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array.
ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type);
//! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.)
ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type);
//! Get the block size of a UPC shared modifier type
ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type);
//! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays)
ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t);
//! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type.
ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t);
//! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first.
ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t);
//! Is a UPC array with dimension of X*THREADS
ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t);
//! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to.
ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL);
// DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types.
//! Get the type of the associated argument expression from the function type.
ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression);
//! Verify that 2 SgTemplateArgument are equivalent (same type, same expression, or same template declaration)
ROSE_DLL_API bool templateArgumentEquivalence(SgTemplateArgument * arg1, SgTemplateArgument * arg2);
//! Verify that 2 SgTemplateArgumentPtrList are equivalent.
ROSE_DLL_API bool templateArgumentListEquivalence(const SgTemplateArgumentPtrList & list1, const SgTemplateArgumentPtrList & list2);
//! Test for equivalence of types independent of access permissions (private or protected modes for members of classes).
ROSE_DLL_API bool isEquivalentType (const SgType* lhs, const SgType* rhs);
//! Test if two types are equivalent SgFunctionType nodes. This is necessary for template function types
//! They may differ in one SgTemplateType pointer but identical otherwise.
ROSE_DLL_API bool isEquivalentFunctionType (const SgFunctionType* lhs, const SgFunctionType* rhs);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Loop handling
\brief
*/
// by Jeremiah
//! Add a step statement to the end of a loop body
//! Add a new label to the end of the loop, with the step statement after
//! it; then change all continue statements in the old loop body into
//! jumps to the label
//!
//! For example:
//! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes
//! while (a < 5) {if (a < -3) goto label; label: a++;}
ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step);
ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f);
ROSE_DLL_API void convertForToWhile(SgForStatement* f);
ROSE_DLL_API void convertAllForsToWhiles(SgNode* top);
//! Change continue statements in a given block of code to gotos to a label
ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label);
//!Return the loop index variable for a for loop
ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop);
//!Check if a SgInitializedName is used as a loop index within a AST subtree
//! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them.
ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root);
//! Check if a for loop uses C99 style initialization statement with multiple expressions like for (int i=0, j=0; ..) or for (i=0,j=0;...)
/*!
for (int i=0, j=0; ..) is stored as two variable declarations under SgForInitStatement's init_stmt member
for (i=0,j=0;...) is stored as a single expression statement, with comma expression (i=0,j=0).
*/
ROSE_DLL_API bool hasMultipleInitStatmentsOrExpressions (SgForStatement* for_loop);
//! Routines to get and set the body of a loop
ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop);
ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body);
//! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop
ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop);
//! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop.
ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond);
//! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested
//!
//! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition
ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL);
//! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1
ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/);
//! Set the lower bound of a loop header for (i=lb; ...)
ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb);
//! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...)
ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub);
//! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc)
ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride);
//! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary
ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop);
//! Undo the normalization of for loop's C99 init declaration. Previous record of normalization is used to ease the reverse transformation.
ROSE_DLL_API bool unnormalizeForLoopInitDeclaration(SgForStatement* loop);
//! Normalize a for loop, return true if successful. Generated constants will be fold by default.
//!
//! Translations are :
//! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..)
//! For test expression:
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
//! For increment expression:
//! i++ is normalized to i+=1 and
//! i-- is normalized to i+=-1
//! i-=s is normalized to i+= -s
ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true);
//! Normalize a for loop's test expression
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
ROSE_DLL_API bool normalizeForLoopTest(SgForStatement* loop);
ROSE_DLL_API bool normalizeForLoopIncrement(SgForStatement* loop);
//!Normalize a Fortran Do loop. Make the default increment expression (1) explicit
ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop);
//! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor.
ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor);
//! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!).
ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder);
//! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s
ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize);
//Winnie Loop Collapsing
SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor);
bool getForLoopInformations(
SgForStatement * for_loop,
SgVariableSymbol * & iterator,
SgExpression * & lower_bound,
SgExpression * & upper_bound,
SgExpression * & stride
);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Topdown search
\brief Top-down traversal from current node to find a node of a specified type
*/
//! Query a subtree to get all nodes of a given type, with an appropriate downcast.
template <typename NodeType>
std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant)
{
Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant);
std::vector<NodeType*> result(nodes.size(), NULL);
int count = 0;
for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin();
i != nodes.end(); ++i, ++count) {
NodeType* node = dynamic_cast<NodeType*>(*i);
ROSE_ASSERT (node);
result[count] = node;
}
return result;
}
/*! \brief Returns STL vector of SgFile IR node pointers.
Demonstrates use of restricted traversal over just SgFile IR nodes.
*/
std::vector < SgFile * >generateFileList ();
/** Get the current SgProject IR Node.
*
* The library should never have more than one project and it asserts such. If no project has been created yet then this
* function returns the null pointer. */
ROSE_DLL_API SgProject * getProject();
//! \return the project associated with a node
SgProject * getProject(const SgNode * node);
//! Query memory pools to grab SgNode of a specified type
template <typename NodeType>
static std::vector<NodeType*> getSgNodeListFromMemoryPool()
{
// This function uses a memory pool traversal specific to the SgFile IR nodes
class MyTraversal : public ROSE_VisitTraversal
{
public:
std::vector<NodeType*> resultlist;
void visit ( SgNode* node)
{
NodeType* result = dynamic_cast<NodeType* > (node);
ROSE_ASSERT(result!= NULL);
if (result!= NULL)
{
resultlist.push_back(result);
}
};
virtual ~MyTraversal() {}
};
MyTraversal my_traversal;
NodeType::traverseMemoryPoolNodes(my_traversal);
return my_traversal.resultlist;
}
/*! \brief top-down traversal from current node to find the main() function declaration
*/
ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode);
//! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another variable declaration statement. Pragma declarations are not treated as a declaration by default in this context.
SgStatement* findLastDeclarationStatement(SgScopeStatement * scope, bool includePragma = false);
//midend/programTransformation/partialRedundancyElimination/pre.h
//! Find referenced symbols within an expression
std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr);
//! Find break statements inside a particular statement, stopping at nested loops or switches
/*! loops or switch statements defines their own contexts for break
statements. The function will stop immediately if run on a loop or switch
statement. If fortranLabel is non-empty, breaks (EXITs) to that label within
nested loops are included in the returned list.
*/
std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = "");
//! Find all continue statements inside a particular statement, stopping at nested loops
/*! Nested loops define their own contexts for continue statements. The
function will stop immediately if run on a loop
statement. If fortranLabel is non-empty, continues (CYCLEs) to that label
within nested loops are included in the returned list.
*/
std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = "");
std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l);
std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw);
//! Collect all variable references in a subtree
void collectVarRefs(SgLocatedNode* root, std::vector<SgVarRefExp* >& result);
//! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag.
template <typename T>
T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining)
{
bool found = false;
#if 0
printf ("In findDeclarationStatement(): root = %p \n",root);
printf ("In findDeclarationStatement(): name = %s \n",name.c_str());
printf ("In findDeclarationStatement(): scope = %p \n",scope);
printf ("In findDeclarationStatement(): isDefining = %s \n",isDefining ? "true" : "false");
#endif
// Do we really want a NULL pointer to be acceptable input to this function?
// Maybe we should have an assertion that it is non-null?
if (!root) return NULL;
T* decl = dynamic_cast<T*>(root);
#if 0
printf ("In findDeclarationStatement(): decl = %p \n",decl);
#endif
if (decl != NULL)
{
if (scope)
{
if ((decl->get_scope() == scope) && (decl->search_for_symbol_from_symbol_table()->get_name() == name))
{
found = true;
}
}
else // Liao 2/9/2010. We should allow NULL scope
{
#if 0
// DQ (12/6/2016): Include this into the debugging code to aboid compiler warning about unused variable.
SgSymbol* symbol = decl->search_for_symbol_from_symbol_table();
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table() = %p \n",symbol);
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table()->get_name() = %s \n",symbol->get_name().str());
#endif
if (decl->search_for_symbol_from_symbol_table()->get_name() == name)
{
found = true;
}
}
}
if (found)
{
if (isDefining)
{
#if 0
printf ("In findDeclarationStatement(): decl->get_firstNondefiningDeclaration() = %p \n",decl->get_firstNondefiningDeclaration());
printf ("In findDeclarationStatement(): decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
ROSE_ASSERT (decl->get_definingDeclaration() != NULL);
#if 0
printf ("In findDeclarationStatement(): returing decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
return dynamic_cast<T*> (decl->get_definingDeclaration());
}
else
{
#if 0
printf ("In findDeclarationStatement(): returing decl = %p \n",decl);
#endif
return decl;
}
}
std::vector<SgNode*> children = root->get_traversalSuccessorContainer();
#if 0
printf ("In findDeclarationStatement(): children.size() = %zu \n",children.size());
#endif
// DQ (4/10/2016): Note that if we are searching for a function member that has it's defining
// declaration defined outside of the class then it will not be found in the child list.
for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i)
{
T* target = findDeclarationStatement<T> (*i,name,scope,isDefining);
if (target)
{
return target;
}
}
return NULL;
}
//! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>.
SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining);
#if 0 //TODO
// 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX
// until reach the end node
SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
// 2. return all nodes of type VariantT following the source node
std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Bottom up search
\brief Backwards traverse through the AST to find a node, findEnclosingXXX()
*/
// remember to put const to all arguments.
/** Find a node by type using upward traversal.
*
* Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant
* ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the
* starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode.
*
* For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first
* non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining
* declaration is different than the first non-defining declaration.
*
* If no ancestor of the requisite type of subtypes is found then this function returns a null pointer.
*
* If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot
* be an enclosing node of the specified type. */
template <typename NodeType>
NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false)
{
#if 1
// DQ (10/20/2012): This is the older version of this implementation. Until I am sure that
// the newer version (below) is what we want to use I will resolve this conflict by keeping
// the previous version in place.
if (NULL == astNode)
{
return NULL;
}
if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) )
{
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode));
}
// DQ (3/5/2012): Check for reference to self...
ROSE_ASSERT(astNode->get_parent() != astNode);
SgNode* parent = astNode->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
SgNode* previouslySeenParent = parent;
bool foundCycle = false;
while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode(): parent = %p = %s \n",parent,parent->class_name().c_str());
#endif
parent = parent->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
// ROSE_ASSERT(parent != previouslySeenParent);
if (parent == previouslySeenParent)
{
foundCycle = true;
}
}
#if 0
printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
parent = previouslySeenParent;
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p \n",declarationStatement);
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the non-defining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
#if 0
printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
// DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for
// debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However,
// this will have to be revisited later since it appears clear that it is a problem for the binary analysis
// work when it is visited for this case. Since the cycle is detected, but there is no assertion on the
// cycle, we don't exit when a cycle is identified (which is the point of the code below).
// Note also that I have fixed the code (above and below) to only chase pointers through defining
// declarations (where they exist), this is important since non-defining declarations can be almost
// anywhere (and thus chasing them can make it appear that there are cycles where there are none
// (I think); test2012_234.C demonstrates an example of this.
// DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work.
// if (foundCycle == true)
if (foundCycle == false)
{
while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str());
if (parent->get_file_info() != NULL)
parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug");
#endif
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null");
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
parent = parent->get_parent();
#if 1
// DQ (3/5/2012): Check for loops that will cause infinite loops.
ROSE_ASSERT(parent != previouslySeenParent);
#else
printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n");
if (parent == previouslySeenParent)
break;
#endif
}
}
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent));
#else
// DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below).
// Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop).
// Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const.
SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent());
std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles
while (node) {
if (NodeType *found = dynamic_cast<NodeType*>(node))
return found;
// FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09]
ROSE_ASSERT(seen.insert(node).second);
// Traverse to parent (declaration statements are a special case)
if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) {
SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) {
// DQ (10/19/2012): Use the defining declaration instead.
// node = firstNondefiningDeclaration;
node = definingDeclaration;
}
} else {
node = node->get_parent();
}
}
return NULL;
#endif
}
//! Find enclosing source file node
ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false);
//! Get the closest scope from astNode. Return astNode if it is already a scope.
ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode);
//! Get the enclosing scope from a node n
ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false);
//! Traverse back through a node's parents to find the enclosing global scope
ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode);
//! Find the function definition
ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false);
ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false);
//! Find the closest enclosing statement, including the given node
ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n);
//! Find the closest switch outside a given statement (normally used for case and default statements)
ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s);
//! Find enclosing OpenMP clause body statement from s. If s is already one, return it directly.
ROSE_DLL_API SgOmpClauseBodyStatement* findEnclosingOmpClauseBodyStatement(SgStatement* s);
//! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it
ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false);
//! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration.
ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false);
//roseSupport/utility_functions.h
//! get the SgFile node from current node
ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode );
//! Get the initializer containing an expression if it is within an initializer.
ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n);
//! Get the closest class definition enclosing the specified AST node,
ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false);
// TODO
#if 0
SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
SgVariableDeclaration* findVariableDeclaratin( const string& varname)
SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode);
// e.g. for some expression, find its parent statement
SgStatement* getEnclosingStatement(const SgNode* astNode);
SgSwitchStatement* getEnclosingSwitch(SgStatement* s);
SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode);
// used to build a variable reference for compiler generated code in current scope
SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Walk and Traversal
\brief
*/
// Liao, 1/9/2008
/*!
\brief return the first global scope under current project
*/
ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project);
/*!
\brief get the last statement within a scope, return NULL if it does not exit
*/
ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope);
//! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers.
ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false);
//!Find the first defining function declaration statement in a scope
ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope);
//! Get next statement within the same scope of current statement
ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt);
//! Get previous statement of the current statement. It may return a previous statement of a parent scope by default (climbOutScope is true), otherwise only a previous statement of the same scope is returned.
ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt, bool climbOutScope = true);
#if 0 //TODO
// preorder traversal from current SgNode till find next SgNode of type V_SgXXX
SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Comparison
\brief Compare AST nodes, subtree, etc
*/
//! Check if a SgIntVal node has a given value
ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value);
//! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same.
/*!
* There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C
*/
ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2);
//! Check if a statement is the last statement within its closed scope
ROSE_DLL_API bool isLastStatement(SgStatement* stmt);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST insert, removal, and replacement
\brief Add, remove,and replace AST
scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc.
*/
// DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining).
//! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result.
ROSE_DLL_API void deleteAST(SgNode* node);
//! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only).
ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root);
// DQ (2/25/2009): Added new function to support outliner.
//! Move statements in first block to the second block (preserves order and rebuilds the symbol table).
ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock );
//! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc.
ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope);
//! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Append a statement to the end of SgForInitStatement
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
// DQ (2/6/2009): Added function to support outlining into separate file.
//! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers).
ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles );
//! Prepend a statement to the beginning of the current scope, handling side
//! effects as appropriate
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Prepend a statement to the beginning of SgForInitStatement
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! prepend a list of statements to the beginning of the current scope,
//! handling side effects as appropriate
ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
//! Check if a scope statement has a simple children statement list
//! so insert additional statements under the scope is straightforward and unambiguous .
//! for example, SgBasicBlock has a simple statement list while IfStmt does not.
ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope);
//! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically
ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before or after the target statement within the
//target's scope
ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true);
//! Insert a statement before a target statement
ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before a target statement
ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts);
//! Insert a statement after a target statement, Move around preprocessing info automatically by default
ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements after a target statement
ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt);
//! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope);
//! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope);
//! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements
// then the statement is inserted at the end of the scope.
ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope,
bool movePreprocessingInfo=true);
//! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements
//then the new statements are inserted at the end of the scope.
ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts,
SgScopeStatement *scope);
//! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()).
ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true);
//! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST()
ROSE_DLL_API void deepDelete(SgNode* root);
//! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested.
ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false);
//! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node.
ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern);
//! Replace all variable references to an old symbol in a scope to being references to a new symbol.
// Essentially replace variable a with b.
ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope );
/** Given an expression, generates a temporary variable whose initializer optionally evaluates
* that expression. Then, the var reference expression returned can be used instead of the original
* expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp;
* this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles
* reference types correctly by using pointer types for the temporary.
* @param expression Expression which will be replaced by a variable
* @param scope scope in which the temporary variable will be generated
* @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed
* @return declaration of the temporary variable, and a a variable reference expression to use instead of
* the original expression. */
std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression,
SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL);
/* This function creates a temporary variable for a given expression in the given scope
This is different from SageInterface::createTempVariableForExpression in that it does not
try to be smart to create pointers to reference types and so on. The tempt is initialized to expression.
The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration
may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage.
@param expression Expression which will be replaced by a variable
@param scope scope in which the temporary variable will be generated
*/
std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression
(SgExpression* expression, SgScopeStatement* scope);
//! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible
/*! We recommend to build SgFunctionParameterList before building a function declaration
However, it is still allowed to append new arguments for existing function declarations.
\todo function type , function symbol also need attention.
*/
ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*);
//!Prepend an argument to SgFunctionParameterList
ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*);
//! Append an expression to a SgExprListExp, set the parent pointer also
ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*);
//! Append an expression list to a SgExprListExp, set the parent pointers also
ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&);
//! Set parameter list for a function declaration, considering existing parameter list etc.
template <class actualFunction>
void setParameterList(actualFunction *func,SgFunctionParameterList *paralist) {
// TODO consider the difference between C++ and Fortran
// fixup the scope of arguments,no symbols for nondefining function declaration's arguments
// DQ (11/25/2011): templated function so that we can handle both
// SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member
// function derived classes).
ROSE_ASSERT(func != NULL);
ROSE_ASSERT(paralist != NULL);
#if 0
// At this point we don't have cerr and endl defined, so comment this code out.
// Warn to users if a paralist is being shared
if (paralist->get_parent() !=NULL)
{
cerr << "Waring! Setting a used SgFunctionParameterList to function: "
<< (func->get_name()).getString()<<endl
<< " Sharing parameter lists can corrupt symbol tables!"<<endl
<< " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl;
// ROSE_ASSERT(false);
}
#endif
// Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!!
if (func->get_parameterList() != NULL)
{
if (func->get_parameterList() != paralist)
{
delete func->get_parameterList();
}
}
func->set_parameterList(paralist);
paralist->set_parent(func);
// DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node.
// This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C,
// test2012_81.C and testcode2012_82.C demonstrate this problem.
SgInitializedNamePtrList & args = paralist->get_args();
for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++)
{
(*i)->set_declptr(func);
}
}
//! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer.
ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma);
//! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept.
ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false);
//! Replace a given expression with a list of statements produced by a generator
ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Similar to replaceExpressionWithStatement, but with more restrictions.
//! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in
ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc.
ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand);
//!set left hand operand for binary expressions, transparently downcasting target expressions when necessary
ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs);
//!set left hand operand for binary expression
ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs);
//! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly.
ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top);
// DQ (1/25/2010): Added support for directories
//! Move file to be generated in a subdirectory (will be generated by the unparser).
ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file );
//! Supporting function to comment relocation in insertStatement() and removeStatement().
ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement);
//! Relocate comments and CPP directives from one statement to another.
ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement);
// DQ (7/19/2015): This is required to support general unparsing of template instantations for the GNU g++
// compiler which does not permit name qualification to be used to support the expression of the namespace
// where a template instantiatoon would be places. Such name qualification would also sometimes require
// global qualification which is also not allowed by the GNU g++ compiler. These issues appear to be
// specific to the GNU compiler versions, at least versions 4.4 through 4.8.
//! Relocate the declaration to be explicitly represented in its associated namespace (required for some backend compilers to process template instantiations).
ROSE_DLL_API void moveDeclarationToAssociatedNamespace ( SgDeclarationStatement* declarationStatement );
ROSE_DLL_API bool isTemplateInstantiationNode(SgNode* node);
ROSE_DLL_API void wrapAllTemplateInstantiationsInAssociatedNamespaces(SgProject* root);
// DQ (12/1/2015): Adding support for fixup internal data struuctures that have references to statements (e.g. macro expansions).
ROSE_DLL_API void resetInternalMapsForTargetStatement(SgStatement* sourceStatement);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST repair, fix, and postprocessing.
\brief Mostly used internally when some AST pieces are built without knowing their target
scope/parent, especially during bottom-up construction of AST. The associated symbols,
parent and scope pointers cannot be set on construction then.
A set of utility functions are provided to
patch up scope, parent, symbol for them when the target scope/parent become know.
*/
//! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed.
/*! In AST translation, it is possible to build a variable reference before the variable
is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders
to get the work done. Users should call fixVariableReference() when AST is complete and all
variable declarations are in place.
*/
ROSE_DLL_API int fixVariableReferences(SgNode* root);
//!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known.
/*!
It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general.
In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement().
*/
ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope.
ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope.
ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope.
ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope);
//! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL.
ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope);
//! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed.
ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value);
//! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope
ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope);
//! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST.
ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope);
// DQ (6/11/2015): This reports the statements that are marked as transformed (used to debug the token-based unparsing).
//! This collects the statements that are marked as transformed (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectTransformedStatements( SgNode* node );
//! This collects the statements that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectModifiedStatements( SgNode* node );
//! This collects the SgLocatedNodes that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgLocatedNode*> collectModifiedLocatedNodes( SgNode* node );
//@}
//! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope.
/*! This function not only set the defining and nondefining links of the newly introduced
* function declaration inside a scope, but also update other same function declarations' links
* accordingly if there are any.
* Assumption: The function has already inserted/appended/prepended into the scope before calling this function.
*/
ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope);
//------------------------------------------------------------------------
//@{
/*! @name Advanced AST transformations, analyses, and optimizations
\brief Some complex but commonly used AST transformations.
*/
//! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++
ROSE_DLL_API bool
collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false);
//!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars, bool coarseGrain=true);
//!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars, bool coarseGrain=true);
//!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols, bool coarseGrain=true);
//! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref);
//! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//!Call liveness analysis on an entire project
ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false);
//!get liveIn and liveOut variables for a for loop from liveness analysis result liv.
ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts);
#endif
//!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types.
ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, OmpSupport::omp_construct_enum> > & results);
//! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations!
/*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */
ROSE_DLL_API void constantFolding(SgNode* r);
//!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements (with duplicated statement s) and return expressions with side effects. Return the number of statements inserted.
/*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement.
*/
ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s);
//! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments.
ROSE_DLL_API void removeJumpsToNextStatement(SgNode*);
//! Remove labels which are not targets of any goto statements
ROSE_DLL_API void removeUnusedLabels(SgNode* top);
//! Remove consecutive labels
ROSE_DLL_API void removeConsecutiveLabels(SgNode* top);
//! Merge a variable assignment statement into a matching variable declaration statement. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original i=10 will be deleted after the merge
* if success, return true, otherwise return false (e.g. variable declaration does not match or already has an initializer)
* The original assignment stmt will be removed by default
* This function is a bit ambiguous about the merge direction, to be phased out.
*/
ROSE_DLL_API bool mergeDeclarationAndAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt, bool removeAssignStmt = true);
//! Merge an assignment into its upstream declaration statement. Callers should make sure the merge is semantically correct.
ROSE_DLL_API bool mergeAssignmentWithDeclaration (SgExprStatement* assign_stmt, SgVariableDeclaration* decl, bool removeAssignStmt = true);
//! Merge a declaration statement into a matching followed variable assignment. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original int i; will be deleted after the merge
*/
ROSE_DLL_API bool mergeDeclarationWithAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt);
//! Split a variable declaration with an rhs assignment into two statements: a declaration and an assignment.
/*! Return the generated assignment statement, if any
* e.g. int i =10; becomes int i; i=10;
* This can be seen as a normalization of declarations
*/
ROSE_DLL_API SgExprStatement* splitVariableDeclaration (SgVariableDeclaration* decl);
//! Split declarations within a scope into declarations and assignment statements, by default only top level declarations are considered. Return the number of declarations split.
ROSE_DLL_API int splitVariableDeclaration (SgScopeStatement* scope, bool topLevelOnly = true);
//! Replace an expression with a temporary variable and an assignment statement
/*!
Add a new temporary variable to contain the value of 'from'
Change reference to 'from' to use this new variable
Assumptions: 'from' is not within the test of a loop or 'if'
not currently traversing 'from' or the statement it is in
*/
ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = "");
//! Split long expressions into blocks of statements
ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr);
//! Remove labeled goto statements
ROSE_DLL_API void removeLabeledGotos(SgNode* top);
//! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label.
ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch);
//! Check if the body of a 'for' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs);
//! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs);
//! Check if the body of a 'while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws);
//! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws);
//! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws);
//! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs);
//! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs);
//! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs);
//! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true);
//! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos);
//! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt);
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Remove unused basic block IR nodes added as part of normalization.
ROSE_DLL_API void cleanupNontransformedBasicBlockNode();
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Record where normalization have been done so that we can preform denormalizations as required for the token-based unparsing to generate minimal diffs.
ROSE_DLL_API void recordNormalizations(SgStatement* s);
//! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while,
//! switch, If, Catch, OmpBodyStmt, etc
bool isBodyStatement (SgStatement* s);
//! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them.
void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true);
// The same as changeAllBodiesToBlocks(SgNode* top). Phased out.
//void changeAllLoopBodiesToBlocks(SgNode* top);
//! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc.
SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt);
#if 0
/** If s is the body of a loop, catch, or if statement and is already a basic block,
* s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent
* (a loop, catch, or if statement, etc). */
SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s);
#endif
//! Get the constant value from a constant integer expression; abort on
//! everything else. Note that signed long longs are converted to unsigned.
unsigned long long getIntegerConstantValue(SgValueExp* expr);
//! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace.
std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt );
//! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned.
SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp);
//! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref.
SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL);
/// \brief moves the body of a function f to a new function f`;
/// f's body is replaced with code that forwards the call to f`.
/// \return a pair indicating the statement containing the call of f`
/// and an initialized name refering to the temporary variable
/// holding the result of f`. In case f returns void
/// the initialized name is NULL.
/// \param definingDeclaration the defining function declaration of f
/// \param newName the name of function f`
/// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; }
/// for functions returning void and a value, respectively.
/// two function declarations are inserted in f's enclosing scope
/// \code
/// result_type f`(...); <--- (1)
/// result_type f (...) { forward call to f` }
/// result_type f`(...) { original code } <--- (2)
/// \endcode
/// Calls to f are not updated, thus in the transformed code all
/// calls will continue calling f (this is also true for
/// recursive function calls from within the body of f`).
/// After the function has created the wrapper,
/// definingDeclaration becomes the wrapper function
/// The definition of f` is the next entry in the
/// statement list; the forward declaration of f` is the previous
/// entry in the statement list.
/// \pre definingDeclaration must be a defining declaration of a
/// free standing function.
/// typeid(SgFunctionDeclaration) == typeid(definingDeclaration)
/// i.e., this function is NOT implemented for class member functions,
/// template functions, procedures, etc.
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName);
/// \overload
/// \tparam NameGen functor that generates a new name based on the old name.
/// interface: SgName nameGen(const SgName&)
/// \param nameGen name generator
/// \brief see wrapFunction for details
template <class NameGen>
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen)
{
return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name()));
}
/// \brief convenience function that returns the first initialized name in a
/// list of variable declarations.
SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl);
//@}
// DQ (6/7/2012): Unclear where this function should go...
bool hasTemplateSyntax( const SgName & name );
#if 0
//------------------------AST dump, stringify-----------------------------
//------------------------------------------------------------------------
std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h
// do we need these?
std::string dump_node(const SgNode* astNode);
std::string dump_tree(const SgNode* astNode);
// or a friendly version of unparseToString(), as a memeber function
std::string SgNode::toString(bool asSubTree=true); // dump node or subtree
//----------------------------AST comparison------------------------------
//------------------------------------------------------------------------
// How to get generic functions for comparison?
bool isNodeEqual(SgNode* node1, SgNode* node2); //?
bool isTreeEqual(SgNode* tree1, SgNode* tree2);
//! Are two expressions equal (using a deep comparison)?
bool expressionTreeEqual(SgExpression*, SgExpression*);
//! Are corresponding expressions in two lists equal (using a deep comparison)?
bool expressionTreeEqualStar(const SgExpressionPtrList&,
const SgExpressionPtrList&);
//----------------------AST verfication/repair----------------------------
//------------------------------------------------------------------------
// sanity check of AST subtree, any suggestions?
// TODO
verifySgNode(SgNode* node, bool subTree=true);
//src/midend/astDiagnostics/AstConsistencyTests.h
// AstTests::runAllTests(SgProject * )
//src/midend/astUtil/astInterface/AstInterface.h.C
//FixSgProject(SgProject &project)
//FixSgTree(SgNode* r)
//src/frontend/SageIII/astPostProcessing
//AstPostProcessing(SgNode * node)
//--------------------------AST modification------------------------------
//------------------------------------------------------------------------
// any operations changing AST tree, including
// insert, copy, delete(remove), replace
// insert before or after some point, argument list is consistent with LowLevelRewrite
void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true);
// previous examples
//void myStatementInsert(SgStatement* target,...)
// void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock)
// copy
// copy children of one basic block to another basic block
//void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b);
void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst);
// delete (remove) a node or a whole subtree
void removeSgNode(SgNode* targetNode); // need this?
void removeSgNodeTree(SgNode* subtree); // need this?
void removeStatement( SgStatement* targetStmt);
//Move = delete + insert
void moveAst (SgNode* src, SgNode* target); // need this?
// similar to
void moveStatements (SgBasicBlock* src, SgBasicBlock* target);
// replace= delete old + insert new (via building or copying)
// DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE.
// void replaceAst(SgNode* oldNode, SgNode* newNode);
//void replaceChild(SgNode* parent, SgNode* from, SgNode* to);
//bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n)
//--------------------------AST transformations---------------------------
//------------------------------------------------------------------------
// Advanced AST modifications through basic AST modifications
// Might not be included in AST utitlity list, but listed here for the record.
// extract statements/content from a scope
void flattenBlocks(SgNode* n);
//src/midend/astInlining/inlinerSupport.h
void renameVariables(SgNode* n);
void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition);
void simpleCopyAndConstantPropagation(SgNode* top);
void changeAllMembersToPublic(SgNode* n);
void removeVariableDeclaration(SgInitializedName* initname);
//! Convert something like "int a = foo();" into "int a; a = foo();"
SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init);
//! Rewrites a while or for loop so that the official test is changed to
//! "true" and what had previously been the test is now an if-break
//! combination (with an inverted condition) at the beginning of the loop
//! body
void pushTestIntoBody(LoopStatement* loopStmt);
//programTransformation/finiteDifferencing/finiteDifferencing.h
//! Move variables declared in a for statement to just outside that statement.
void moveForDeclaredVariables(SgNode* root);
//------------------------ Is/Has functions ------------------------------
//------------------------------------------------------------------------
// misc. boolean functions
// some of them could moved to SgXXX class as a member function
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
bool isSwitchCond (const SgStatement* s);
bool isIfCond (const SgStatement* s);
bool isWhileCond (const SgStatement* s);
bool isStdNamespace (const SgScopeStatement* scope);
bool isTemplateInst (const SgDeclarationStatement* decl);
bool isCtor (const SgFunctionDeclaration* func);
bool isDtor (const SgFunctionDeclaration* func);
// src/midend/astInlining/typeTraits.h
bool hasTrivialDestructor(SgType* t);
ROSE_DLL_API bool isNonconstReference(SgType* t);
ROSE_DLL_API bool isReferenceType(SgType* t);
// generic ones, or move to the SgXXX class as a member function
bool isConst(SgNode* node); // const type, variable, function, etc.
// .... and more
bool isConstType (const SgType* type);
bool isConstFunction (const SgFunctionDeclaration* decl);
bool isMemberVariable(const SgInitializedName & var);
//bool isMemberVariable(const SgNode& in);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
bool MayRedefined(SgExpression* expr, SgNode* root);
// bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h
bool hasAddressTaken(SgExpression* expr, SgNode* root);
//src/midend/astInlining/inlinerSupport.C
// can also classified as topdown search
bool containsVariableReference(SgNode* root, SgInitializedName* var);
bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var);
bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc,
SgInitializedName* toCheck,
SgInitializedName* lifetime)
//src/midend/programTransformation/partialRedundancyElimination/pre.h
bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n);
//------------------------ loop handling ---------------------------------
//------------------------------------------------------------------------
//get and set loop control expressions
// 0: init expr, 1: condition expr, 2: stride expr
SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt );
int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp);
bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref);
SgInitializedName * getLoopIndexVar(SgForStatement* forstmt);
//------------------------expressions-------------------------------------
//------------------------------------------------------------------------
//src/midend/programTransformation/partialRedundancyElimination/pre.h
int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root);
//src/midend/astInlining/replaceExpressionWithStatement.h
void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to);
void replaceSubexpressionWithStatement(SgExpression* from,
StatementGenerator* to);
SgExpression* getRootOfExpression(SgExpression* n);
//--------------------------preprocessing info. -------------------------
//------------------------------------------------------------------------
//! Removes all preprocessing information at a given position.
void cutPreprocInfo (SgBasicBlock* b,
PreprocessingInfo::RelativePositionType pos,
AttachedPreprocessingInfoType& save_buf);
//! Pastes preprocessing information at the front of a statement.
void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
//! Pastes preprocessing information at the back of a statement.
void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
/*!
* \brief Moves 'before' preprocessing information.
* Moves all preprocessing information attached 'before' the source
* statement to the front of the destination statement.
*/
// a generic one for all
/// void movePreprocessingInfo(src, dest, RelativePositionType);
void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest);
void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest);
void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest);
//--------------------------------operator--------------------------------
//------------------------------------------------------------------------
from transformationSupport.h, not sure if they should be included here
/* return enum code for SAGE operators */
operatorCodeType classifyOverloadedOperator(); // transformationSupport.h
/*! \brief generates a source code string from operator name.
This function returns a string representing the elementwise operator (for primative types)
that would be match that associated with the overloaded operator for a user-defined
abstractions (e.g. identifyOperator("operator+()") returns "+").
*/
std::string stringifyOperator (std::string name);
//--------------------------------macro ----------------------------------
//------------------------------------------------------------------------
std::string buildMacro ( std::string s ); //transformationSupport.h
//--------------------------------access functions---------------------------
//----------------------------------get/set sth.-----------------------------
// several categories:
* get/set a direct child/grandchild node or fields
* get/set a property flag value
* get a descendent child node using preorder searching
* get an ancestor node using bottomup/reverse searching
// SgName or string?
std::string getFunctionName (SgFunctionCallExp* functionCallExp);
std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression );
// do we need them anymore? or existing member functions are enought?
// a generic one:
std::string get_name (const SgNode* node);
std::string get_name (const SgDeclarationStatement * declaration);
// get/set some property: should moved to SgXXX as an inherent memeber function?
// access modifier
void setExtern (SgFunctionDeclartion*)
void clearExtern()
// similarly for other declarations and other properties
void setExtern (SgVariableDeclaration*)
void setPublic()
void setPrivate()
#endif
// DQ (1/23/2013): Added support for generated a set of source sequence entries.
std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode );
//--------------------------------Type Traits (C++)---------------------------
bool HasNoThrowAssign(const SgType * const inputType);
bool HasNoThrowCopy(const SgType * const inputType);
bool HasNoThrowConstructor(const SgType * const inputType);
bool HasTrivialAssign(const SgType * const inputType);
bool HasTrivialCopy(const SgType * const inputType);
bool HasTrivialConstructor(const SgType * const inputType);
bool HasTrivialDestructor(const SgType * const inputType);
bool HasVirtualDestructor(const SgType * const inputType);
bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType);
bool IsAbstract(const SgType * const inputType);
bool IsClass(const SgType * const inputType);
bool IsEmpty(const SgType * const inputType);
bool IsEnum(const SgType * const inputType);
bool IsPod(const SgType * const inputType);
bool IsPolymorphic(const SgType * const inputType);
bool IsStandardLayout(const SgType * const inputType);
bool IsLiteralType(const SgType * const inputType);
bool IsTrivial(const SgType * const inputType);
bool IsUnion(const SgType * const inputType);
SgType * UnderlyingType(SgType *type);
// DQ (3/2/2014): Added a new interface function (used in the snippet insertion support).
// void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList );
// DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators.
bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 );
// JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface
/*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */
struct const_int_expr_t {
size_t value_;
bool hasValue_;
};
/*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */
struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr);
// JP (9/17/14): Added function to test whether two SgType* are equivalent or not
bool checkTypesAreEqual(SgType *typeA, SgType *typeB);
//--------------------------------Java interface functions ---------------------
#ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
ROSE_DLL_API std::string getTempDirectory(SgProject *project);
ROSE_DLL_API void destroyTempDirectory(std::string);
ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false);
ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string);
ROSE_DLL_API std::string preprocessImport(SgProject *, std::string);
ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true);
ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string);
ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *);
#endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
// DQ (8/31/2016): Making this a template function so that we can have it work with user defined filters.
//! This function detects template instantiations that are relevant when filters are used.
/*!
EDG normalizes some in-class template functions and member functions to be redefined outside of a class. this causes the associated template instantiations
to be declared outside of the class, and to be marked as compiler generated (since the compiler generated form outside of the class declaration).
ROSE captures the function definitions, but in the new location (defined outside of the class declaration). This can confuse some simple tests
for template instantiations that are a part of definitions in a file, thus we have this function to detect this specific normalization.
*/
template < class T >
bool isTemplateInstantiationFromTemplateDeclarationSatisfyingFilter (SgFunctionDeclaration* function, T* filter )
{
// DQ (9/1/2016): This function is called in the Call graph generation to avoid filtering out EDG normalized
// function template instnatiations (which come from normalized template functions and member functions).
// Note that because of the EDG normailzation the membr function is moved outside of the class, and
// thus marked as compiler generated. However the template instantiations are always marked as compiler
// generated (if not specializations) and so we want to include a template instantiation that is marked
// as compiler generated, but is from a template declaration that satisfyied a specific user defined filter.
// The complexity of this detection is isolated here, but knowing that it must be called is more complex.
// This function is call in the CG.C file of tests/nonsmoke/functional/roseTests/programAnalysisTests/testCallGraphAnalysis.
bool retval = false;
#define DEBUG_TEMPLATE_NORMALIZATION_DETECTION 0
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf ("In isNormalizedTemplateInstantiation(): function = %p = %s = %s \n",function,function->class_name().c_str(),function->get_name().str());
#endif
// Test for this to be a template instantation (in which case it was marked as
// compiler generated but we may want to allow it to be used in the call graph,
// if it's template was a part was defined in the current directory).
SgTemplateInstantiationFunctionDecl* templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(function);
SgTemplateInstantiationMemberFunctionDecl* templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(function);
if (templateInstantiationFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(templateInstantiationFunction->get_firstNondefiningDeclaration());
SgTemplateFunctionDeclaration* templateFunctionDeclaration = templateInstantiationFunction->get_templateDeclaration();
if (templateFunctionDeclaration != NULL)
{
retval = filter->operator()(templateFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
else
{
if (templateInstantiationMemberFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(templateInstantiationMemberFunction->get_firstNondefiningDeclaration());
SgTemplateMemberFunctionDeclaration* templateMemberFunctionDeclaration = templateInstantiationMemberFunction->get_templateDeclaration();
if (templateMemberFunctionDeclaration != NULL)
{
retval = filter->operator()(templateMemberFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationMemberFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
}
return retval;
}
}// end of namespace
#endif
|
generator_3177.c | /*
Author: David Zhu (P1703177)
Class: DISM/FT/1A/21
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <ctype.h>
#include <crypt.h>
#include <omp.h>
#include "functions_3177.h"
void printHelp(char *);
int isAllNumeric(char *);
void writefile(char *, Hash *, int);
void readfile(FILE *, Hash *, int, int);
unsigned long long countLines(FILE *, int, int);
int main(int argc, char *argv[]) {
if (argc != 4) {
printHelp(argv[0]);
return 1;
} else if (isAllNumeric(argv[2]) || isAllNumeric(argv[3])) {
printHelp(argv[0]);
return 1;
} else if (atoi(argv[2]) > atoi(argv[3]) || atoi(argv[2]) < 1) {
printHelp(argv[0]);
return 1;
}
FILE *fp = is_valid_file(argv[1]);
/* print program start time */
char buf[26];
time_t rawtime;
struct tm * timeinfo;
time (&rawtime);
timeinfo = localtime(&rawtime);
strftime(buf, 26, "%Y:%m:%d %H:%M:%S\n", timeinfo);
printf("Program started at %s", buf);
clock_t start = clock();
int min = atoi(argv[2]);
int max = atoi(argv[3]);
unsigned long long count = countLines(fp, min, max); // get number of lines
Hash hashes[count];
readfile(fp, hashes, min, max);
printf("Total number of words processed => %llu\n", count);
int nProcessors = omp_get_max_threads(); // get number of threads avaliable
omp_set_num_threads(nProcessors); // set number of threads to max avaliable
#pragma omp parallel for
for (int i = 0; i < count; i++) {
/* https://stackoverflow.com/questions/9335777/crypt-r-example/9335810#9335810 */
struct crypt_data data; // storage space for crypt_r
data.initialized = 0;
hashes[i].md5 = strdup(crypt_r(hashes[i].plaintext, "$1$$", &data)); // MD5 Hash
hashes[i].sha512 = strdup(crypt_r(hashes[i].plaintext, "$6$$", &data)); // SHA-512 Hash
}
/* https://stackoverflow.com/questions/5248915/execution-time-of-c-program/5249150#5249150 */
clock_t end = clock();
double cpuTime = (double)(end - start) / CLOCKS_PER_SEC;
writefile("mytab2411.txt", hashes, count); // write hashes out to disk
printf("Total number of generated entries => %llu\n", count << 1);
/* Print program end time */
time(&rawtime);
timeinfo = localtime(&rawtime);
/* https://stackoverflow.com/questions/3673226/how-to-print-time-in-format-2009-08-10-181754-811/3673291#3673291 */
strftime(buf, 26, "%Y:%m:%d %H:%M:%S\n", timeinfo);
printf("Program ended at %s", buf);
printf("CPU time: %lf\n", cpuTime);
fclose(fp);
return 0;
}
/* Prints out help menu
takes program name as arg*/
void printHelp(char *name) {
printf("Usage: %s <wordlist> <min> <max>\n\n", name);
printf("\t<wordlist> : A file path/name in which contains the password dictonary\n");
printf("\t<min> : An integer value greater than 0.\n\t\tThis value represents the minimum length of the password.\n");
printf("\t<max> : An integer value greater than or equals to <min>.\n\t\t<max> represents the maximum length of the password\n");
}
/* takes an commandline arg as arg
returns 1 when non-numeric character is detected
returns 0 when all characters are numeric */
int isAllNumeric(char *arg) {
int i = 0;
while (arg[i] != '\0') {
if (!(isdigit(arg[i]))) return 1;
i++;
}
return 0;
}
/* takes file pointer, min length and max length as args
returns number of lines with length matching the min max in file */
unsigned long long countLines(FILE *fp, int min, int max) {
rewind(fp);
char * line = NULL;
size_t len = 0;
unsigned long long count = 0;
/* https://stackoverflow.com/questions/3501338/c-read-file-line-by-line/3501681#3501681 */
while ((getline(&line, &len, fp)) != -1) {
/* https://stackoverflow.com/questions/2693776/removing-trailing-newline-character-from-fgets-input/28462221#28462221 */
line[strcspn(line,"\n")] = 0; // strip new line
int length = strlen(line);
if (length < min || length > max) continue;
count++;
}
return count;
}
/* takes output file name, array of Hash structs and length of array as args
writes hashes out to disk */
void writefile(char *name, Hash *array, int len) {
FILE *fp = fopen(name, "w");
for (int i = 0; i < len; i++) {
fprintf(fp, "%s:%s\n%s:%s\n", array[i].plaintext, array[i].md5, array[i].plaintext, array[i].sha512);
}
fclose(fp);
}
/* takes file pointer, array of Hash structs and min and max length as args
reads the wordlist into the array of Hash structs */
void readfile(FILE *fp, Hash *array, int min, int max) {
rewind(fp); // get file pointer
char * line = NULL;
size_t len = 0;
int i = 0;
/* https://stackoverflow.com/questions/3501338/c-read-file-line-by-line/3501681#3501681 */
while ((getline(&line, &len, fp)) != -1) {
/* https://stackoverflow.com/questions/2693776/removing-trailing-newline-character-from-fgets-input/28462221#28462221 */
line[strcspn(line,"\n")] = 0; // strip new line
int length = strlen(line);
if (length < min || length > max) continue;
array[i].plaintext = strdup(line);
i++;
}
free(line); // free memory
} |
hdp.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <inttypes.h>
#include "hdp.h"
#include "hdp_math_utils.h"
#include "sonLib.h"
#include "ranlib.h"
#define N_IG_NUM_PARAMS 4
#ifndef MINUS_INF
#define MINUS_INF -0.5 * DBL_MAX
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846264338
#endif
typedef struct Factor Factor;
typedef struct DirichletProcess DirichletProcess;
typedef enum FactorType {
BASE,
MIDDLE,
DATA_PT
} FactorType;
struct Factor {
FactorType factor_type;
struct Factor* parent;
stSet* children;
double* factor_data;
struct DirichletProcess* dp;
};
struct DirichletProcess {
int64_t id;
struct HierarchicalDirichletProcess* hdp;
double* gamma;
int64_t depth;
struct DirichletProcess* parent;
stList* children;
stSet* factors;
int64_t num_factor_children;
double base_factor_wt;
double* posterior_predictive;
double* spline_slopes;
double cached_factor_mean;
double cached_factor_sum_sq_dev;
int64_t cached_factor_size;
bool observed;
};
struct HierarchicalDirichletProcess {
bool finalized;
double* data;
int64_t* data_pt_dp_id;
int64_t data_length;
struct DirichletProcess* base_dp;
struct DirichletProcess** dps;
int64_t num_dps;
// normal-inverse gamma parameters
double mu;
double nu;
double two_alpha;
double beta;
double* sampling_grid;
int64_t grid_length;
int64_t samples_taken;
bool splines_finalized;
// TODO: replace this with my new offset log gamma memo
//struct SumOfLogsMemo* log_sum_memo;
int64_t depth;
bool sample_gamma;
double* gamma;
double* gamma_alpha;
double* gamma_beta;
double* w_aux_vector;
bool* s_aux_vector;
stSet* distr_metric_memos;
};
struct DistributionMetricMemo {
int64_t num_distrs;
double* memo_matrix;
HierarchicalDirichletProcess* hdp;
double (*metric_func) (HierarchicalDirichletProcess*, int64_t, int64_t);
};
bool is_structure_finalized(HierarchicalDirichletProcess* hdp) {
return hdp->finalized;
}
bool is_gamma_random(HierarchicalDirichletProcess* hdp) {
return hdp->sample_gamma;
}
bool is_sampling_finalized(HierarchicalDirichletProcess* hdp) {
return hdp->splines_finalized;
}
bool hdp_check_for_observed(HierarchicalDirichletProcess *hdp, int64_t kmerIndex) {
return hdp->dps[kmerIndex]->observed;
}
int64_t get_num_dir_proc(HierarchicalDirichletProcess* hdp) {
return hdp->num_dps;
}
int64_t get_depth(HierarchicalDirichletProcess* hdp) {
return hdp->depth;
}
int64_t get_num_data(HierarchicalDirichletProcess* hdp) {
return hdp->data_length;
}
double* get_data_copy(HierarchicalDirichletProcess* hdp) {
int64_t data_length = hdp->data_length;
double* data = (double*) malloc(sizeof(double) * data_length);
for (int64_t i = 0; i < data_length; i++) {
data[i] = hdp->data[i];
}
return data;
}
int64_t* get_data_pt_dp_ids_copy(HierarchicalDirichletProcess* hdp) {
int64_t data_length = hdp->data_length;
int64_t* dp_ids = (int64_t*) malloc(sizeof(int64_t) * data_length);
for (int64_t i = 0; i < data_length; i++) {
dp_ids[i] = hdp->data_pt_dp_id[i];
}
return dp_ids;
}
double* get_gamma_params_copy(HierarchicalDirichletProcess* hdp) {
int64_t depth = hdp->depth;
double* gamma_params = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
gamma_params[i] = hdp->gamma[i];
}
return gamma_params;
}
double get_mu(HierarchicalDirichletProcess* hdp) {
return hdp->mu;
}
double get_nu(HierarchicalDirichletProcess* hdp) {
return hdp->nu;
}
double get_alpha(HierarchicalDirichletProcess* hdp) {
return hdp->two_alpha / 2.0;
}
double get_beta(HierarchicalDirichletProcess* hdp) {
return hdp->beta;
}
int64_t get_grid_length(HierarchicalDirichletProcess* hdp) {
return hdp->grid_length;
}
double* get_sampling_grid_copy(HierarchicalDirichletProcess* hdp) {
int64_t grid_length = hdp->grid_length;
double* sampling_grid = (double*) malloc(sizeof(double) * grid_length);
for (int64_t i = 0; i < grid_length; i++) {
sampling_grid[i] = hdp->sampling_grid[i];
}
return sampling_grid;
}
double* get_gamma_alpha_params_copy(HierarchicalDirichletProcess* hdp) {
if (!hdp->sample_gamma) {
fprintf(stderr, "Hierarchical Dirichlet process is not sampling gamma parameters.");
exit(EXIT_FAILURE);
}
int64_t depth = hdp->depth;
double* gamma_alpha = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
gamma_alpha[i] = hdp->gamma_alpha[i];
}
return gamma_alpha;
}
double* get_gamma_beta_params_copy(HierarchicalDirichletProcess* hdp) {
if (!hdp->sample_gamma) {
fprintf(stderr, "Hierarchical Dirichlet process is not sampling gamma parameters.");
exit(EXIT_FAILURE);
}
int64_t depth = hdp->depth;
double* gamma_beta = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
gamma_beta[i] = hdp->gamma_beta[i];
}
return gamma_beta;
}
int64_t get_dir_proc_num_factors(HierarchicalDirichletProcess* hdp, int64_t dp_id) {
if (dp_id < 0 || dp_id >= hdp->num_dps) {
fprintf(stderr, "Hierarchical Dirichlet process has no Dirichlet process with this ID.\n");
exit(EXIT_FAILURE);
}
DirichletProcess* dp = hdp->dps[dp_id];
return stSet_size(dp->factors);
}
int64_t get_dir_proc_parent_id(HierarchicalDirichletProcess* hdp, int64_t dp_id) {
if (dp_id < 0 || dp_id >= hdp->num_dps) {
fprintf(stderr, "Hierarchical Dirichlet process has no Dirichlet process with this ID.\n");
exit(EXIT_FAILURE);
}
DirichletProcess* dp = hdp->dps[dp_id];
if (dp->parent == NULL) {
return -1;
}
else {
return dp->parent->id;
}
}
DistributionMetricMemo* new_distr_metric_memo(HierarchicalDirichletProcess* hdp,
double (*metric_func) (HierarchicalDirichletProcess*, int64_t, int64_t)) {
DistributionMetricMemo* memo = (DistributionMetricMemo*) malloc(sizeof(DistributionMetricMemo));
int64_t num_dps = hdp->num_dps;
memo->num_distrs = num_dps;
int64_t num_entries = ((num_dps - 1) * num_dps) / 2;
double* memo_matrix = (double*) malloc(sizeof(double) * num_entries);
memo->memo_matrix = memo_matrix;
for (int64_t i = 0; i < num_entries; i++) {
memo_matrix[i] = -1.0;
}
memo->hdp = hdp;
memo->metric_func = metric_func;
stSet_insert(hdp->distr_metric_memos, memo);
return memo;
}
void destroy_distr_metric_memo(void* memo) {
DistributionMetricMemo* metric_memo = (DistributionMetricMemo*) memo;
free(metric_memo->memo_matrix);
free(metric_memo);
}
void cache_base_factor_params(Factor* fctr, double mu, double nu, double two_alpha, double beta, double log_post_term) {
if (fctr->factor_type != BASE) {
fprintf(stderr, "Can only cache parameters for base factors.\n");
exit(EXIT_FAILURE);
}
double* param_array = fctr->factor_data;
param_array[0] = mu;
param_array[1] = nu;
param_array[2] = two_alpha;
param_array[3] = beta;
param_array[4] = log_post_term;
}
Factor* new_base_factor(HierarchicalDirichletProcess* hdp) {
Factor* fctr = (Factor*) malloc(sizeof(Factor));
fctr->factor_type = BASE;
fctr->factor_data = (double*) malloc(sizeof(double) * (N_IG_NUM_PARAMS + 1));
cache_base_factor_params(fctr, hdp->mu, hdp->nu, hdp->two_alpha, hdp->beta, 1.0);
fctr->parent = NULL;
fctr->children = stSet_construct();
DirichletProcess* base_dp = hdp->base_dp;
fctr->dp = base_dp;
stSet_insert(base_dp->factors, (void*) fctr);
return fctr;
}
Factor* new_middle_factor(DirichletProcess* dp) {
if (dp->parent == NULL) {
fprintf(stderr, "Attempted to create middle factor in root Dirichlet process.\n");
exit(EXIT_FAILURE);
}
Factor* fctr = (Factor*) malloc(sizeof(Factor));
fctr->factor_type = MIDDLE;
fctr->factor_data = NULL;
// note: assigning to parent handled externally
fctr->parent = NULL;
fctr->children = stSet_construct();
fctr->dp = dp;
stSet_insert(dp->factors, (void*) fctr);
return fctr;
}
Factor* new_data_pt_factor(HierarchicalDirichletProcess* hdp, int64_t data_pt_idx) {
Factor* fctr = (Factor*) malloc(sizeof(Factor));
fctr->factor_type = DATA_PT;
fctr->factor_data = &(hdp->data[data_pt_idx]);
// note: assigning to parent handled externally
fctr->parent = NULL;
fctr->children = NULL;
fctr->dp = NULL;
return fctr;
}
void destroy_factor(Factor* fctr) {
stSet* children = fctr->children;
if (children != NULL) {
if (stSet_size(children) > 0) {
fprintf(stderr, "Attempted to destroy factor that still has children.\n");
exit(EXIT_FAILURE);
}
stSet_destruct(children);
}
Factor* parent = fctr->parent;
if (parent != NULL) {
stSet_remove(parent->children, (void*) fctr);
(parent->dp->num_factor_children)--;
if (stSet_size(parent->children) == 0) {
destroy_factor(parent);
}
}
if (fctr->factor_type == BASE) {
free(fctr->factor_data);
}
DirichletProcess* dp = fctr->dp;
if (dp != NULL) {
stSet_remove(dp->factors, (void*) fctr);
}
free(fctr);
}
Factor* get_base_factor(Factor* fctr) {
while (fctr->factor_type != BASE) {
fctr = fctr->parent;
if (fctr == NULL) {
break;
}
}
return fctr;
}
double get_factor_data_pt(Factor* fctr) {
if (fctr->factor_type != DATA_PT) {
fprintf(stderr, "Attempted to access data point from non-leaf factor.\n");
exit(EXIT_FAILURE);
}
return *(fctr->factor_data);
}
void get_factor_sum_internal(Factor* fctr, double* sum, int64_t* num_data) {
if (fctr->factor_type == DATA_PT) {
*sum += get_factor_data_pt(fctr);
// TODO: there should be a way to use the parent's counters instead of recounting the data pts
(*num_data)++;
}
else {
stSetIterator* child_iter = stSet_getIterator(fctr->children);
Factor* child_fctr = (Factor*) stSet_getNext(child_iter);
while (child_fctr != NULL) {
get_factor_sum_internal(child_fctr, sum, num_data);
child_fctr = (Factor*) stSet_getNext(child_iter);
}
stSet_destructIterator(child_iter);
}
}
void get_factor_ssd_internal(Factor* fctr, double center, double* sum_sq_dev) {
if (fctr->factor_type == DATA_PT) {
double dev = get_factor_data_pt(fctr) - center;
*sum_sq_dev += dev * dev;
}
else {
stSetIterator* child_iter = stSet_getIterator(fctr->children);
Factor* child_fctr = (Factor*) stSet_getNext(child_iter);
while (child_fctr != NULL) {
get_factor_ssd_internal(child_fctr, center, sum_sq_dev);
child_fctr = (Factor*) stSet_getNext(child_iter);
}
stSet_destructIterator(child_iter);
}
}
void get_factor_stats(Factor* fctr, double* mean_out, double* sum_sq_dev_out, int64_t* num_data_out) {
*mean_out = 0.0;
*sum_sq_dev_out = 0.0;
*num_data_out = 0;
get_factor_sum_internal(fctr, mean_out, num_data_out);
*mean_out /= (double) *num_data_out;
get_factor_ssd_internal(fctr, *mean_out, sum_sq_dev_out);
}
void add_update_base_factor_params(Factor* fctr, double mean, double sum_sq_devs, double num_data) {
double* param_array = fctr->factor_data;
double mu_prev = param_array[0];
double nu_prev = param_array[1];
double two_alpha_prev = param_array[2];
double beta_prev = param_array[3];
double nu_post = nu_prev + num_data;
double mu_post = (mu_prev * nu_prev + mean * num_data) / nu_post;
double two_alpha_post = two_alpha_prev + num_data;
double mean_dev = mean - mu_prev;
double sq_mean_dev = nu_prev * num_data * mean_dev * mean_dev / nu_post;
double beta_post = beta_prev + .5 * (sum_sq_devs + sq_mean_dev);
double log_post_term = log_posterior_conditional_term(nu_post, two_alpha_post, beta_post);//,
//fctr->dp->hdp->log_sum_memo);
cache_base_factor_params(fctr, mu_post, nu_post, two_alpha_post, beta_post, log_post_term);
}
void remove_update_base_factor_params(Factor* fctr, double mean, double sum_sq_devs, double num_data) {
double* param_array = fctr->factor_data;
double mu_post = param_array[0];
double nu_post = param_array[1];
double two_alpha_post = param_array[2];
double beta_post = param_array[3];
double nu_prev = nu_post - num_data;
double mu_prev = (mu_post * nu_post - mean * num_data) / nu_prev;
double two_alpha_prev = two_alpha_post - num_data;
double mean_dev = mean - mu_prev;
double sq_mean_dev = nu_prev * num_data * mean_dev * mean_dev / nu_post;
double beta_prev = beta_post - 0.5 * (sum_sq_devs + sq_mean_dev);
double log_post_term = log_posterior_conditional_term(nu_prev, two_alpha_prev, beta_prev);//,
//fctr->dp->hdp->log_sum_memo);
cache_base_factor_params(fctr, mu_prev, nu_prev, two_alpha_prev, beta_prev, log_post_term);
}
double factor_parent_joint_log_likelihood(Factor* fctr, Factor* parent) {
Factor* base_fctr = get_base_factor(parent);
DirichletProcess* dp = fctr->dp;
double num_reassign = (double) dp->cached_factor_size;
double mean_reassign = dp->cached_factor_mean;
double sum_sq_devs = dp->cached_factor_sum_sq_dev;
double* param_array = base_fctr->factor_data;
double mu_denom = param_array[0];
double nu_denom = param_array[1];
double two_alpha_denom = param_array[2];
double beta_denom = param_array[3];
double nu_numer = nu_denom + num_reassign;
double two_alpha_numer = two_alpha_denom + num_reassign;
double mean_dev = mean_reassign - mu_denom;
double sq_mean_dev = nu_denom * num_reassign * mean_dev * mean_dev / nu_numer;
double beta_numer = beta_denom + 0.5 * (sum_sq_devs + sq_mean_dev);
double log_denom = param_array[4];
double log_numer = log_posterior_conditional_term(nu_numer, two_alpha_numer, beta_numer);//,
//dp->hdp->log_sum_memo);
return -0.5 * num_reassign * log(2.0 * M_PI) + log_numer - log_denom;
}
double data_pt_factor_parent_likelihood(Factor* data_pt_fctr, Factor* parent) {
if (data_pt_fctr->factor_type != DATA_PT) {
fprintf(stderr, "Can only access data point likelihood for data point factors.\n");
exit(EXIT_FAILURE);
}
double data_pt = get_factor_data_pt(data_pt_fctr);
Factor* base_fctr = get_base_factor(parent);
double* param_array = base_fctr->factor_data;
double mu_denom = param_array[0];
double nu_denom = param_array[1];
double two_alpha_denom = param_array[2];
double beta_denom = param_array[3];
double nu_numer = nu_denom + 1.0;
double mean_dev = data_pt - mu_denom;
double sq_mean_dev = nu_denom * mean_dev * mean_dev / nu_numer;
double two_alpha_numer = two_alpha_denom + 1.0;
double beta_numer = beta_denom + 0.5 * sq_mean_dev;
double log_denom = param_array[4];
double log_numer = log_posterior_conditional_term(nu_numer, two_alpha_numer, beta_numer);//,
//base_fctr->dp->hdp->log_sum_memo);
return (1.0 / sqrt(2.0 * M_PI)) * exp(log_numer - log_denom);
}
void evaluate_posterior_predictive(Factor* base_fctr, double* x, double* pdf_out, int64_t length) {//,
//SumOfLogsMemo* log_sum_memo) {
if (base_fctr->factor_type != BASE) {
fprintf(stderr, "Can only evaluate posterior predictive of base factors.\n");
exit(EXIT_FAILURE);
}
double* param_array = base_fctr->factor_data;
double mu_denom = param_array[0];
double nu_denom = param_array[1];
double two_alpha_denom = param_array[2];
double beta_denom = param_array[3];
double log_denom = param_array[4];
double nu_numer = nu_denom + 1.0;
double two_alpha_numer = two_alpha_denom + 1.0;
double nu_ratio = nu_denom / nu_numer;
double pi_factor = 1.0 / sqrt(2.0 * M_PI);
double mean_dev;
double sq_mean_dev;
double beta_numer;
double log_numer;
for (int64_t i = 0; i < length; i++) {
mean_dev = x[i] - mu_denom;
sq_mean_dev = nu_ratio * mean_dev * mean_dev;
beta_numer = beta_denom + 0.5 * sq_mean_dev;
log_numer = log_posterior_conditional_term(nu_numer, two_alpha_numer, beta_numer);//,
//log_sum_memo);
pdf_out[i] = pi_factor * exp(log_numer - log_denom);
}
}
void evaluate_prior_predictive(HierarchicalDirichletProcess* hdp,
double* x, double* pdf_out, int64_t length) {
//TODO: this could be made more efficient with some precomputed variables stashed in HDP
double mu = hdp->mu;
double nu = hdp->nu;
double two_alpha = hdp->two_alpha;
double beta = hdp->beta;
double nu_factor = nu / (2.0 * (nu + 1.0) * beta);
//double alpha_term = exp(log_gamma_half(two_alpha + 1, hdp->log_sum_memo)
// - log_gamma_half(two_alpha, hdp->log_sum_memo));
double alpha_term = exp(lgamma(.5 * (two_alpha + 1.0)) - lgamma(.5 * two_alpha));
double beta_term = sqrt(nu_factor / M_PI);
double constant_term = alpha_term * beta_term;
double alpha_power = -0.5 * (two_alpha + 1.0);
for (int64_t i = 0; i < length; i++) {
double dev = x[i] - mu;
double var_term = pow(1.0 + nu_factor * dev * dev, alpha_power);
pdf_out[i] = constant_term * var_term;
}
}
double prior_likelihood(HierarchicalDirichletProcess* hdp, Factor* fctr) {
if (fctr->factor_type != DATA_PT) {
fprintf(stderr, "Cannot calculate point prior likelihood from non-data point factor.\n");
}
//TODO: this could be made more efficient with some precomputed variables stashed in HDP
double mu = hdp->mu;
double nu = hdp->nu;
double dbl_two_alpha = hdp->two_alpha;
//int64_t two_alpha = (int64_t) dbl_two_alpha;
double beta = hdp->beta;
double data_pt = get_factor_data_pt(fctr);
double dev = data_pt - mu;
//double alpha_term = exp(log_gamma_half(two_alpha + 1, hdp->log_sum_memo)
// - log_gamma_half(two_alpha, hdp->log_sum_memo));
double alpha_term = exp(lgamma(.5 * (dbl_two_alpha + 1.0)) - lgamma(.5 * dbl_two_alpha));
double nu_term = nu / (2.0 * (nu + 1.0) * beta);
double beta_term = pow(1.0 + nu_term * dev * dev, -0.5 * (dbl_two_alpha + 1.0));
return alpha_term * sqrt(nu_term / M_PI) * beta_term;
}
double prior_joint_log_likelihood(HierarchicalDirichletProcess* hdp, Factor* fctr) {
if (fctr->factor_type != MIDDLE) {
fprintf(stderr, "Cannot calculate joint prior likelihood from non-middle factor.\n");
}
double mu = hdp->mu;
double nu = hdp->nu;
double dbl_two_alpha = hdp->two_alpha;
//int64_t two_alpha = (int64_t) dbl_two_alpha;
double beta = hdp->beta;
DirichletProcess* dp = fctr->dp;
int64_t num_reassign = dp->cached_factor_size;
double dbl_reassign = (double) num_reassign;
double mean_reassign = dp->cached_factor_mean;
double sum_sq_devs = dp->cached_factor_sum_sq_dev;
double mean_dev = mean_reassign - mu;
double sq_mean_dev = nu * dbl_reassign * mean_dev * mean_dev / (nu + dbl_reassign);
//double log_alpha_term = log_gamma_half(two_alpha + num_reassign, hdp->log_sum_memo)
// - log_gamma_half(two_alpha, hdp->log_sum_memo);
double log_alpha_term = lgamma(.5 * (dbl_two_alpha + dbl_reassign)) - lgamma(.5 * dbl_two_alpha);
double log_nu_term = 0.5 * (log(nu) - log(nu + dbl_reassign));
double log_pi_term = 0.5 * dbl_reassign * log(2.0 * M_PI);
double log_beta_term_1 = dbl_two_alpha * log(beta);
double log_beta_term_2 = (dbl_two_alpha + dbl_reassign)
* log(beta + 0.5 * (sum_sq_devs + sq_mean_dev));
return log_alpha_term + log_nu_term - log_pi_term + 0.5 * (log_beta_term_1 - log_beta_term_2);
}
// TODO: figure out how to break into chunks and spin up threads to reduce the sum behind the iterator
double unobserved_factor_likelihood(Factor* fctr, DirichletProcess* dp) {
DirichletProcess* parent_dp = dp->parent;
if (parent_dp == NULL) {
return prior_likelihood(dp->hdp, fctr);
}
else {
double parent_gamma = *(parent_dp->gamma);
double likelihood = 0.0;
double next_height_unobs_likelihood;
int64_t num_parent_fctrs = stSet_size(parent_dp->factors);
Factor** parent_fctrs = (Factor**) malloc(sizeof(Factor*) * num_parent_fctrs);
#pragma omp parallel shared(likelihood,next_height_unobs_likelihood,parent_dp,num_parent_fctrs,parent_fctrs)
{
#pragma omp single nowait
next_height_unobs_likelihood = unobserved_factor_likelihood(fctr, parent_dp);
#pragma omp single
{
stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors);
for (int64_t i = 0; i < num_parent_fctrs; i++) {
parent_fctrs[i] = (Factor*) stSet_getNext(parent_fctr_iter);
}
stSet_destructIterator(parent_fctr_iter);
}
double local_likelihood = 0.0;
Factor* parent_fctr;
#pragma omp for nowait
for (int64_t i = 0; i < num_parent_fctrs; i++) {
parent_fctr = parent_fctrs[i];
local_likelihood += stSet_size(parent_fctr->children) * data_pt_factor_parent_likelihood(fctr, parent_fctr);
}
#pragma omp critical
likelihood += local_likelihood;
}
free(parent_fctrs);
likelihood += parent_gamma * next_height_unobs_likelihood;
likelihood /= (parent_gamma + (double) parent_dp->num_factor_children);
return likelihood;
}
}
//double unobserved_factor_likelihood(Factor* fctr, DirichletProcess* dp) {
// DirichletProcess* parent_dp = dp->parent;
// if (parent_dp == NULL) {
// return prior_likelihood(dp->hdp, fctr);
// }
// else {
// double parent_gamma = *(parent_dp->gamma);
// double likelihood = 0.0;
//
// stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors);
//
// Factor* parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter);
// double fctr_size;
// while (parent_fctr != NULL) {
// fctr_size = (double) stSet_size(parent_fctr->children);
// likelihood += fctr_size * data_pt_factor_parent_likelihood(fctr, parent_fctr);
// parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter);
// }
// stSet_destructIterator(parent_fctr_iter);
//
// likelihood += parent_gamma * unobserved_factor_likelihood(fctr, parent_dp);
//
// likelihood /= (parent_gamma + (double) parent_dp->num_factor_children);
//
// return likelihood;
// }
//}
double unobserved_factor_joint_log_likelihood(Factor* fctr, DirichletProcess* dp) {
DirichletProcess* parent_dp = dp->parent;
if (parent_dp == NULL) {
return prior_joint_log_likelihood(dp->hdp, fctr);
}
else {
double parent_gamma = *(parent_dp->gamma);
double log_likelihood = MINUS_INF;
int64_t num_parent_fctrs = stSet_size(parent_dp->factors);
Factor** parent_fctrs = (Factor**) malloc(sizeof(Factor*) * num_parent_fctrs);
double next_height_unobs_log_likelihood;
#pragma omp parallel shared(log_likelihood,next_height_unobs_log_likelihood,parent_dp,num_parent_fctrs,parent_fctrs)
{
#pragma omp single nowait
next_height_unobs_log_likelihood = unobserved_factor_joint_log_likelihood(fctr, parent_dp);
#pragma omp single
{
stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors);
for (int64_t i = 0; i < num_parent_fctrs; i++) {
parent_fctrs[i] = (Factor*) stSet_getNext(parent_fctr_iter);
}
stSet_destructIterator(parent_fctr_iter);
}
double local_log_likelihood = MINUS_INF;
double log_fctr_size;
Factor* parent_fctr;
#pragma omp for nowait
for (int64_t i = 0; i < num_parent_fctrs; i++) {
parent_fctr = parent_fctrs[i];
log_fctr_size = log(stSet_size(parent_fctr->children));
local_log_likelihood = add_logs(local_log_likelihood,
log_fctr_size + factor_parent_joint_log_likelihood(fctr, parent_fctr));
}
#pragma omp critical
log_likelihood = add_logs(log_likelihood, local_log_likelihood);
}
free(parent_fctrs);
log_likelihood = add_logs(log_likelihood,
log(parent_gamma) + next_height_unobs_log_likelihood);
log_likelihood -= log(parent_gamma + parent_dp->num_factor_children);
return log_likelihood;
}
}
//double unobserved_factor_joint_log_likelihood(Factor* fctr, DirichletProcess* dp) {
// DirichletProcess* parent_dp = dp->parent;
// if (parent_dp == NULL) {
// return prior_joint_log_likelihood(dp->hdp, fctr);
// }
// else {
// double parent_gamma = *(parent_dp->gamma);
//
// double log_likelihood = MINUS_INF;
// stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors);
// Factor* parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter);
// double log_fctr_size;
// while (parent_fctr != NULL) {
// log_fctr_size = log((double) stSet_size(parent_fctr->children));
// log_likelihood = add_logs(log_likelihood,
// log_fctr_size + factor_parent_joint_log_likelihood(fctr, parent_fctr));
// parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter);
// }
// stSet_destructIterator(parent_fctr_iter);
//
// log_likelihood = add_logs(log_likelihood,
// log(parent_gamma) + unobserved_factor_joint_log_likelihood(fctr, parent_dp));
//
// log_likelihood -= log(parent_gamma + (double) parent_dp->num_factor_children);
//
// return log_likelihood;
// }
//}
DirichletProcess* new_dir_proc() {
DirichletProcess* dp = (DirichletProcess*) malloc(sizeof(DirichletProcess));
dp->gamma = NULL;
dp->depth = 0;
dp->parent = NULL;
dp->children = stList_construct();
dp->factors = stSet_construct();
dp->num_factor_children = 0;
dp->cached_factor_mean = 0.0;
dp->cached_factor_sum_sq_dev = 0.0;
dp->cached_factor_size = 0;
dp->base_factor_wt = 0.0;
dp->posterior_predictive = NULL;
dp->spline_slopes = NULL;
dp->observed = false;
return dp;
}
void clear_factor_tree(Factor* fctr) {
if (fctr->children != NULL) {
stSetIterator* child_fctr_iter = stSet_getIterator(fctr->children);
Factor* child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
while (child_fctr != NULL) {
clear_factor_tree(child_fctr);
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
}
stSet_destructIterator(child_fctr_iter);
}
else {
// note: this will trigger automatic destruction of parent factors
destroy_factor(fctr);
}
}
void destroy_dir_proc_factor_tree(DirichletProcess* dp) {
if (stSet_size(dp->factors) == 0) {
return;
}
stSetIterator* fctr_iter = stSet_getIterator(dp->factors);
Factor* fctr = (Factor*) stSet_getNext(fctr_iter);
while (fctr != NULL) {
clear_factor_tree(fctr);
fctr = (Factor*) stSet_getNext(fctr_iter);
}
stSet_destructIterator(fctr_iter);
}
void destroy_dir_proc(DirichletProcess* dp) {
destroy_dir_proc_factor_tree(dp);
stSet_destruct(dp->factors);
if (dp->children != NULL) {
stListIterator* st_iterator = stList_getIterator(dp->children);
DirichletProcess* dp_child = (DirichletProcess*) stList_getNext(st_iterator);
while (dp_child != NULL) {
destroy_dir_proc(dp_child);
dp_child = (DirichletProcess*) stList_getNext(st_iterator);
}
stList_destructIterator(st_iterator);
stList_destruct(dp->children);
}
if (dp->parent != NULL) {
stList_removeItem(dp->parent->children, (void*) dp);
}
free(dp->posterior_predictive);
free(dp->spline_slopes);
free(dp);
}
// fixed concentration parameters
HierarchicalDirichletProcess* new_hier_dir_proc(int64_t num_dps, int64_t depth, double* gamma, double sampling_grid_start,
double sampling_grid_stop, int64_t sampling_grid_length, double mu,
double nu, double alpha, double beta) {
if (nu <= 0.0) {
fprintf(stderr, "nu parameter of Normal-Inverse Gamma distribution must be positive.\n");
exit(EXIT_FAILURE);
}
//if (alpha <= 0.0) {
// fprintf(stderr, "alpha parameter of Normal-Inverse Gamma distribution must be positive.\n");
// exit(EXIT_FAILURE);
//}
if (beta <= 0.0) {
fprintf(stderr, "beta parameter of Normal-Inverse Gamma distribution must be positive.\n");
exit(EXIT_FAILURE);
}
if (2 * alpha != (int64_t) 2 * alpha || alpha <= 1.0) {
fprintf(stderr, "Normal-Inverse Gamma parameter 'alpha' must be integer or half-integer > 1.0.\n");
exit(EXIT_FAILURE);
}
if (gamma != NULL) {
for (int64_t i = 0; i < depth; i++) {
if (gamma[i] <= 0) {
fprintf(stderr, "Concentration parameter gamma must be postive.\n");
exit(EXIT_FAILURE);
}
}
}
if (num_dps < 2) {
fprintf(stderr, "Hierarchical Dirichlet process formalism requires >= 2 Dirichlet Processes.\n");
exit(EXIT_FAILURE);
}
double* grid = linspace(sampling_grid_start, sampling_grid_stop, sampling_grid_length);
HierarchicalDirichletProcess* hdp = (HierarchicalDirichletProcess*) malloc(sizeof(HierarchicalDirichletProcess));
// normal-inverse gamma parameters
hdp->mu = mu;
hdp->nu = nu;
hdp->two_alpha = 2.0 * alpha;
hdp->beta = beta;
hdp->gamma = gamma;
hdp->depth = depth;
hdp->finalized = false;
hdp->num_dps = num_dps;
DirichletProcess** dps = (DirichletProcess**) malloc(sizeof(DirichletProcess*) * num_dps);
for (int64_t i = 0; i < num_dps; i++) {
DirichletProcess* dp = new_dir_proc();
dp->id = i;
dp->hdp = hdp;
dps[i] = dp;
}
hdp->dps = dps;
hdp->base_dp = NULL;
hdp->sampling_grid = grid;
hdp->grid_length = sampling_grid_length;
hdp->samples_taken = 0;
hdp->splines_finalized = false;
hdp->data = NULL;
hdp->data_pt_dp_id = NULL;
hdp->data_length = 0;
//hdp->log_sum_memo = new_log_sum_memo();
hdp->sample_gamma = false;
hdp->gamma_alpha = NULL;
hdp->gamma_beta = NULL;
hdp->s_aux_vector = NULL;
hdp->w_aux_vector = NULL;
hdp->distr_metric_memos = stSet_construct2(&destroy_distr_metric_memo);
return hdp;
}
// Gamma prior on concentration parameters
HierarchicalDirichletProcess* new_hier_dir_proc_2(int64_t num_dps, int64_t depth, double* gamma_alpha, double* gamma_beta,
double sampling_grid_start, double sampling_grid_stop,
int64_t sampling_grid_length, double mu, double nu, double alpha,
double beta) {
for (int64_t i = 0; i < depth; i++) {
if (gamma_alpha[i] <= 0.0) {
fprintf(stderr, "alpha parameter of Gamma distribution must be positive.\n");
exit(EXIT_FAILURE);
}
if (gamma_beta[i] <= 0.0) {
fprintf(stderr, "beta parameter of Gamma distribution must be positive.\n");
exit(EXIT_FAILURE);
}
}
HierarchicalDirichletProcess* hdp = new_hier_dir_proc(num_dps, depth, NULL, sampling_grid_start, sampling_grid_stop,
sampling_grid_length, mu, nu, alpha, beta);
hdp->sample_gamma = true;
hdp->gamma_alpha = gamma_alpha;
hdp->gamma_beta = gamma_beta;
double* w = (double*) malloc(sizeof(double) * num_dps);
hdp->w_aux_vector = w;
bool* s = (bool*) malloc(sizeof(bool) * num_dps);
hdp->s_aux_vector = s;
for (int64_t i = 0; i < num_dps; i++) {
w[i] = 1.0;
s[i] = false;
}
// init to prior expected value
double* gamma = (double*) malloc(sizeof(double) * depth);
hdp->gamma = gamma;
for (int64_t i = 0; i < depth; i++) {
gamma[i] = gamma_alpha[i] / gamma_beta[i];
}
return hdp;
}
void destroy_hier_dir_proc(HierarchicalDirichletProcess* hdp) {
destroy_dir_proc(hdp->base_dp);
free(hdp->gamma);
free(hdp->data);
free(hdp->data_pt_dp_id);
free(hdp->dps);
free(hdp->sampling_grid);
//destroy_log_sum_memo(hdp->log_sum_memo);
free(hdp->gamma_alpha);
free(hdp->gamma_beta);
free(hdp->w_aux_vector);
free(hdp->s_aux_vector);
stSet_destruct(hdp->distr_metric_memos);
free(hdp);
}
void establish_base_dp(HierarchicalDirichletProcess* hdp) {
DirichletProcess** dps = hdp->dps;
int64_t num_dps = hdp->num_dps;
DirichletProcess* dp;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
if (dp->parent == NULL) {
if (hdp->base_dp == NULL) {
hdp->base_dp = dp;
}
else {
fprintf(stderr, "Hierarchical Dirichlet process contains orphaned Dirichlet process.\n");
exit(EXIT_FAILURE);
}
}
}
if (hdp->base_dp == NULL) {
fprintf(stderr, "Hierarchical Dirichlet process does not contain base Dirichlet process.\n");
exit(EXIT_FAILURE);
}
}
// DFS to verify that Dirichlet processes follow tree structure
void verify_dp_tree(HierarchicalDirichletProcess* hdp) {
int64_t num_dps = hdp->num_dps;
bool* visited = (bool*) malloc(sizeof(bool) * num_dps);
for (int64_t i = 0; i < num_dps; i++) {
visited[i] = false;
}
DirichletProcess* base_dp = hdp->base_dp;
stList* stck = stList_construct();
stList_append(stck, (void*) base_dp);
DirichletProcess* dp;
while (stList_length(stck) > 0) {
dp = (DirichletProcess*) stList_pop(stck);
if (visited[dp->id]) {
fprintf(stderr, "Hierarchical Dirichlet process does not have tree structure.\n");
exit(EXIT_FAILURE);
}
visited[dp->id] = true;
stListIterator* child_iter = stList_getIterator(dp->children);
DirichletProcess* child = (DirichletProcess*) stList_getNext(child_iter);
while (child != NULL) {
stList_append(stck, (void*) child);
child = (DirichletProcess*) stList_getNext(child_iter);
}
stList_destructIterator(child_iter);
}
stList_destruct(stck);
free(visited);
}
void verify_tree_depth(HierarchicalDirichletProcess* hdp, DirichletProcess* dp, int64_t current_depth,
int64_t leaf_depth) {
dp->gamma = &(hdp->gamma[current_depth]);
dp->depth = current_depth;
if (stList_length(dp->children) == 0) {
if (current_depth != leaf_depth) {
fprintf(stderr, "Hierarchical Dirichlet process has leaf Dirichlet process at incorrect depth.\n");
exit(EXIT_FAILURE);
}
}
else {
stListIterator* st_iterator = stList_getIterator(dp->children);
DirichletProcess* child = (DirichletProcess*) stList_getNext(st_iterator);
while (child != NULL) {
verify_tree_depth(hdp, child, current_depth + 1, leaf_depth);
child = (DirichletProcess*) stList_getNext(st_iterator);
}
stList_destructIterator(st_iterator);
}
}
void verify_valid_dp_assignments(HierarchicalDirichletProcess* hdp) {
int64_t length = hdp->data_length;
int64_t num_dps = hdp->num_dps;
DirichletProcess** dps = hdp->dps;
int64_t* dp_ids = hdp->data_pt_dp_id;
int64_t id;
DirichletProcess* dp;
for (int64_t i = 0; i < length; i++) {
id = dp_ids[i];
if (id >= num_dps || id < 0) {
fprintf(stderr, "Data point is assigned to non-existent Dirichlet process.\n");
exit(EXIT_FAILURE);
}
dp = dps[id];
if (stList_length(dp->children) > 0) {
fprintf(stderr, "Data point cannot be assigned to non-leaf Dirichlet process.\n");
exit(EXIT_FAILURE);
}
}
}
void mark_observed_dps(HierarchicalDirichletProcess* hdp) {
// mark newly observed dps
int64_t length = hdp->data_length;
DirichletProcess** dps = hdp->dps;
int64_t* dp_ids = hdp->data_pt_dp_id;
int64_t grid_length = hdp->grid_length;
DirichletProcess* dp;
double* pdf;
int64_t id;
for (int64_t i = 0; i < length; i++) {
id = dp_ids[i];
dp = dps[id];
while (dp != NULL) {
if (dp->observed) {
break;
}
dp->observed = true;
pdf = (double*) malloc(sizeof(double) * grid_length);
dp->posterior_predictive = pdf;
for (int64_t j = 0; j < grid_length; j++) {
pdf[j] = 0.0;
}
dp = dp->parent;
}
}
}
void k_means(int64_t k, double* data, int64_t length, int64_t max_iters, int64_t num_restarts,
int64_t** assignments_out, double** centroids_out) {
if (k > length) {
fprintf(stderr, "Must have at least as many data points as clusters.\n");
exit(EXIT_FAILURE);
}
if (k <= 0) {
fprintf(stderr, "Must have at least one cluster.\n");
exit(EXIT_FAILURE);
}
int64_t* best_assignments = NULL;
double* best_centroids = NULL;
double best_sum_dist = DBL_MAX;
int64_t* centroid_counts = (int64_t*) malloc(sizeof(int64_t) * k);
for (int64_t restart = 0; restart < num_restarts; restart++) {
double* centroids = (double*) malloc(sizeof(double) * k);
int64_t* assignments = (int64_t*) malloc(sizeof(int64_t) * length);
for (int64_t i = 0; i < length; i++) {
assignments[i] = -1;
}
for (int64_t i = 0; i < k; i++) {
centroids[i] = data[rand() % length];
}
for (int64_t iter = 0; iter < max_iters; iter++) {
bool converged = true;
for (int64_t i = 0; i < length; i++) {
double data_pt = data[i];
double dist = fabs(data_pt - centroids[0]);
double closest_dist = dist;
int64_t closest_centroid = 0;
for (int64_t j = 1; j < k; j++) {
dist = fabs(data_pt - centroids[j]);
if (dist < closest_dist) {
closest_centroid = j;
closest_dist = dist;
}
}
if (assignments[i] != closest_centroid) {
converged = false;
}
assignments[i] = closest_centroid;
}
if (converged) {
break;
}
for (int64_t i = 0; i < k; i++) {
centroids[i] = 0.0;
centroid_counts[i] = 0;
}
int64_t assignment;
for (int64_t i = 0; i < length; i++) {
assignment = assignments[i];
centroids[assignment] += data[i];
centroid_counts[assignment]++;
}
for (int64_t i = 0; i < k; i++) {
if (centroid_counts[i] > 0) {
centroids[i] /= centroid_counts[i];
}
else {
centroids[i] = data[rand() % length];
}
}
}
double sum_dist = 0.0;
for (int64_t i = 0; i < length; i++) {
sum_dist += fabs(data[i] - centroids[assignments[i]]);
}
if (sum_dist < best_sum_dist) {
free(best_centroids);
free(best_assignments);
best_centroids = centroids;
best_assignments = assignments;
best_sum_dist = sum_dist;
}
else {
free(centroids);
free(assignments);
}
}
free(centroid_counts);
*centroids_out = best_centroids;
*assignments_out = best_assignments;
}
//void fill_k_means_factor_bank(Factor*** fctr_bank, int64_t* dp_depths, DirichletProcess* dp,
// int64_t depth, int64_t* expected_num_factors) {
// int64_t dp_id = dp->id;
// dp_depths[dp_id] = depth;
//
// int64_t expected_num = expected_num_factors[depth];
//
// Factor** dp_fctr_bank = (Factor**) malloc(sizeof(Factor*) * expected_num);
// fctr_bank[dp_id] = dp_fctr_bank;
// for (int64_t i = 0; i < expected_num; i++) {
// dp_fctr_bank[i] = NULL;
// }
//
//
//}
void get_dp_depths_internal(int64_t* dp_depths, DirichletProcess* dp, int64_t depth) {
dp_depths[dp->id] = depth;
stListIterator* dp_child_iter = stList_getIterator(dp->children);
DirichletProcess* dp_child = stList_getNext(dp_child_iter);
while (dp_child != NULL) {
get_dp_depths_internal(dp_depths, dp_child, depth + 1);
dp_child = stList_getNext(dp_child_iter);
}
stList_destructIterator(dp_child_iter);
}
int64_t* get_dp_depths(HierarchicalDirichletProcess* hdp) {
int64_t* dp_depths = (int64_t*) malloc(sizeof(int64_t) * hdp->num_dps);
get_dp_depths_internal(dp_depths, hdp->base_dp, 0);
return dp_depths;
}
void k_means_init_factors(HierarchicalDirichletProcess* hdp, int64_t max_iters, int64_t num_restarts) {
int64_t tree_depth = hdp->depth;
double* gamma_params = hdp->gamma;
int64_t num_data = hdp->data_length;
int64_t* data_pt_dp_id = hdp->data_pt_dp_id;
int64_t num_dps = hdp->num_dps;
int64_t* dp_depths = get_dp_depths(hdp);
int64_t* depth_dp_counts = (int64_t*) malloc(sizeof(int64_t) * tree_depth);
for (int64_t i = 0; i < tree_depth; i++) {
depth_dp_counts[i] = 0;
}
for (int64_t i = 0; i < num_dps; i++) {
depth_dp_counts[dp_depths[i]]++;
}
int64_t* expected_num_factors = (int64_t*) malloc(sizeof(int64_t) * tree_depth);
double stat_expect = gamma_params[0] * log(1.0 + num_data / gamma_params[0]);
expected_num_factors[0] = ((int64_t) stat_expect / depth_dp_counts[tree_depth - 1]) + 1;
for (int64_t i = 1; i < tree_depth; i++) {
int64_t num_lower_factors = expected_num_factors[i - 1];
stat_expect = gamma_params[i] * log(1.0 + num_lower_factors / gamma_params[i]);
expected_num_factors[i] = ((int64_t) stat_expect / depth_dp_counts[tree_depth - i - 1]) + 1;
if (expected_num_factors[i] > num_lower_factors) {
expected_num_factors[i] = num_lower_factors;
}
}
int64_t** cluster_assignments = (int64_t**) malloc(sizeof(int64_t*) * tree_depth);
double** factor_centers = (double**) malloc(sizeof(double*) * tree_depth);
k_means(expected_num_factors[0], hdp->data, num_data, max_iters, num_restarts,
&cluster_assignments[0], &factor_centers[0]);
for (int64_t i = 1; i < tree_depth; i++) {
k_means(expected_num_factors[i], factor_centers[i - 1], expected_num_factors[i - 1], max_iters, num_restarts,
&cluster_assignments[i], &factor_centers[i]);
}
Factor*** fctr_bank = (Factor***) malloc(sizeof(Factor**) * num_dps);
int64_t num_potential_factors;
int64_t depth;
Factor** dp_fctr_bank;
for (int64_t i = 0; i < num_dps; i++) {
depth = dp_depths[i];
num_potential_factors = expected_num_factors[tree_depth - depth - 1];
dp_fctr_bank = (Factor**) malloc(sizeof(Factor*) * num_potential_factors);
for (int64_t j = 0; j < num_potential_factors; j++) {
dp_fctr_bank[j] = NULL;
}
fctr_bank[i] = dp_fctr_bank;
}
DirichletProcess** dps = hdp->dps;
DirichletProcess* dp;
int64_t dp_id;
Factor* data_pt_fctr;
Factor* parent_fctr;
int64_t parent_fctr_num;
for (int64_t i = 0; i < num_data; i++) {
data_pt_fctr = new_data_pt_factor(hdp, i);
dp_id = data_pt_dp_id[i];
dp = dps[dp_id];
parent_fctr_num = cluster_assignments[0][i];
parent_fctr = fctr_bank[dp_id][parent_fctr_num];
if (parent_fctr == NULL) {
parent_fctr = new_middle_factor(dps[dp_id]);
fctr_bank[dp_id][parent_fctr_num] = parent_fctr;
}
data_pt_fctr->parent = parent_fctr;
stSet_insert(parent_fctr->children, (void*) data_pt_fctr);
(dp->num_factor_children)++;
}
DirichletProcess* parent_dp;
Factor** parent_dp_fctr_bank;
int64_t* assignments;
int64_t expected_num;
Factor* fctr;
// could make this faster with recursion instead of multiple passes
for (int64_t depth = tree_depth - 1; depth > 0; depth--) {
assignments = cluster_assignments[tree_depth - depth];
expected_num = expected_num_factors[tree_depth - depth - 1];
for (int64_t i = 0; i < num_dps; i++) {
if (dp_depths[i] != depth) {
continue;
}
dp = dps[i];
parent_dp = dp->parent;
dp_fctr_bank = fctr_bank[i];
parent_dp_fctr_bank = fctr_bank[parent_dp->id];
for (int64_t j = 0; j < expected_num; j++) {
fctr = dp_fctr_bank[j];
if (fctr == NULL) {
continue;
}
parent_fctr_num = assignments[j];
parent_fctr = parent_dp_fctr_bank[parent_fctr_num];
if (parent_fctr == NULL) {
if (depth > 1) {
parent_fctr = new_middle_factor(parent_dp);
}
else {
parent_fctr = new_base_factor(hdp);
}
parent_dp_fctr_bank[parent_fctr_num] = parent_fctr;
}
fctr->parent = parent_fctr;
stSet_insert(parent_fctr->children, (void*) fctr);
(parent_dp->num_factor_children)++;
}
}
}
double mean, sum_sq_devs;
int64_t num_fctr_data;
stSetIterator* base_fctr_iter = stSet_getIterator(hdp->base_dp->factors);
Factor* base_fctr = stSet_getNext(base_fctr_iter);
while (base_fctr != NULL) {
get_factor_stats(base_fctr, &mean, &sum_sq_devs, &num_fctr_data);
add_update_base_factor_params(base_fctr, mean, sum_sq_devs, (double) num_fctr_data);
base_fctr = stSet_getNext(base_fctr_iter);
}
stSet_destructIterator(base_fctr_iter);
for (int64_t i = 0; i < num_dps; i++) {
free(fctr_bank[i]);
}
for (int64_t i = 0; i < tree_depth; i++) {
free(cluster_assignments[i]);
free(factor_centers[i]);
}
free(cluster_assignments);
free(factor_centers);
free(expected_num_factors);
free(fctr_bank);
free(dp_depths);
free(depth_dp_counts);
}
void init_factors_internal(DirichletProcess* dp, Factor* parent_fctr, stList** data_pt_fctr_lists) {
if (!dp->observed) {
return;
}
Factor* fctr = new_middle_factor(dp);
fctr->parent = parent_fctr;
stSet_insert(parent_fctr->children, (void*) fctr);
if (stList_length(dp->children) == 0) {
stSet* children = fctr->children;
stListIterator* data_pt_fctr_iter = stList_getIterator(data_pt_fctr_lists[dp->id]);
Factor* data_pt_fctr = (Factor*) stList_getNext(data_pt_fctr_iter);
while (data_pt_fctr != NULL) {
data_pt_fctr->parent = fctr;
stSet_insert(children, (void*) data_pt_fctr);
data_pt_fctr = (Factor*) stList_getNext(data_pt_fctr_iter);
}
stList_destructIterator(data_pt_fctr_iter);
}
else {
stListIterator* child_dp_iter = stList_getIterator(dp->children);
DirichletProcess* child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
while (child_dp != NULL) {
init_factors_internal(child_dp, fctr, data_pt_fctr_lists);
child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
}
stList_destructIterator(child_dp_iter);
}
}
void init_factors(HierarchicalDirichletProcess* hdp) {
int64_t data_length = hdp->data_length;
int64_t* data_pt_dp_id = hdp->data_pt_dp_id;
DirichletProcess** dps = hdp->dps;
int64_t num_dps = hdp->num_dps;
stList** data_pt_fctr_lists = (stList**) malloc(sizeof(stList*) * num_dps);
for (int64_t i = 0; i < num_dps; i++) {
data_pt_fctr_lists[i] = NULL;
}
Factor* data_pt_fctr;
int64_t dp_id;
stList* fctr_list;
for (int64_t data_pt_idx = 0; data_pt_idx < data_length; data_pt_idx++) {
dp_id = data_pt_dp_id[data_pt_idx];
fctr_list = data_pt_fctr_lists[dp_id];
if (fctr_list == NULL) {
fctr_list = stList_construct();
data_pt_fctr_lists[dp_id] = fctr_list;
}
data_pt_fctr = new_data_pt_factor(hdp, data_pt_idx);
stList_append(fctr_list, (void*) data_pt_fctr);
}
DirichletProcess* base_dp = hdp->base_dp;
Factor* root_factor = new_base_factor(hdp);
stListIterator* child_dp_iter = stList_getIterator(base_dp->children);
DirichletProcess* child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
while (child_dp != NULL) {
init_factors_internal(child_dp, root_factor, data_pt_fctr_lists);
child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
}
stList_destructIterator(child_dp_iter);
for (int64_t i = 0; i < num_dps; i++) {
if (data_pt_fctr_lists[i] != NULL) {
stList_destruct(data_pt_fctr_lists[i]);
}
}
free(data_pt_fctr_lists);
double mean, sum_sq_devs;
int64_t num_data;
get_factor_stats(root_factor, &mean, &sum_sq_devs, &num_data);
add_update_base_factor_params(root_factor, mean, sum_sq_devs, (double) num_data);
int64_t fctr_child_count;
DirichletProcess* dp;
Factor* fctr;
stSetIterator* fctr_iter;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
fctr_child_count = 0;
fctr_iter = stSet_getIterator(dp->factors);
fctr = (Factor*) stSet_getNext(fctr_iter);
while (fctr != NULL) {
fctr_child_count += stSet_size(fctr->children);
fctr = (Factor*) stSet_getNext(fctr_iter);
}
stSet_destructIterator(fctr_iter);
dp->num_factor_children = fctr_child_count;
}
}
void finalize_data(HierarchicalDirichletProcess* hdp) {
verify_valid_dp_assignments(hdp);
mark_observed_dps(hdp);
init_factors(hdp);
//k_means_init_factors(hdp, 500, 5);
}
void set_dir_proc_parent(HierarchicalDirichletProcess* hdp, int64_t child_id, int64_t parent_id) {
if (hdp->finalized) {
fprintf(stderr, "Hierarchical Dirichlet process structure has been finalized. Cannot set new parent.\n");
exit(EXIT_FAILURE);
}
if (child_id >= hdp->num_dps || parent_id >= hdp->num_dps || child_id < 0 || parent_id < 0) {
fprintf(stderr, "Dirichlet process ID does not exist.\n");
exit(EXIT_FAILURE);
}
DirichletProcess* child_dp = hdp->dps[child_id];
DirichletProcess* parent_dp = hdp->dps[parent_id];
if (child_dp->parent != NULL) {
fprintf(stderr, "Dirichlet process already has parent.\n");
exit(EXIT_FAILURE);
}
child_dp->parent = parent_dp;
stList_append(parent_dp->children, (void*) child_dp);
}
void pass_data_to_hdp(HierarchicalDirichletProcess* hdp, double* data, int64_t* dp_ids, int64_t length) {
if (hdp->data != NULL) {
fprintf(stderr, "Hierarchical Dirichlet process must be reset before passing new data.\n");
exit(EXIT_FAILURE);
}
hdp->data = data;
hdp->data_pt_dp_id = dp_ids;
hdp->data_length = length;
if (hdp->finalized) {
finalize_data(hdp);
}
}
void finalize_hdp_structure(HierarchicalDirichletProcess* hdp) {
establish_base_dp(hdp);
verify_dp_tree(hdp);
verify_tree_depth(hdp, hdp->base_dp, 0, hdp->depth - 1);
if (hdp->data != NULL) {
finalize_data(hdp);
}
hdp->finalized = true;
}
void reset_distr_metric_memo(DistributionMetricMemo* memo) {
int64_t num_distrs = memo->num_distrs;
int64_t num_entries = ((num_distrs - 1) * num_distrs) / 2;
double* memo_entries = memo->memo_matrix;
for (int64_t i = 0; i < num_entries; i++) {
memo_entries[i] = -1.0;
}
}
void reset_hdp_data(HierarchicalDirichletProcess* hdp) {
if (hdp->data == NULL && hdp->data_pt_dp_id == NULL) {
return;
}
free(hdp->data);
hdp->data = NULL;
free(hdp->data_pt_dp_id);
hdp->data_pt_dp_id = NULL;
DirichletProcess** dps = hdp->dps;
int64_t num_dps = hdp->num_dps;
destroy_dir_proc_factor_tree(hdp->base_dp);
DirichletProcess* dp;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
free(dp->posterior_predictive);
dp->posterior_predictive = NULL;
free(dp->spline_slopes);
dp->spline_slopes = NULL;
dp->observed = false;
}
stSetIterator* memo_iter = stSet_getIterator(hdp->distr_metric_memos);
DistributionMetricMemo* memo = stSet_getNext(memo_iter);
while (memo != NULL) {
reset_distr_metric_memo(memo);
memo = stSet_getNext(memo_iter);
}
stSet_destructIterator(memo_iter);
hdp->splines_finalized = false;
hdp->samples_taken = 0;
if (hdp->sample_gamma) {
double* gamma = hdp->gamma;
double* gamma_alpha = hdp->gamma_alpha;
double* gamma_beta = hdp->gamma_beta;
for (int64_t depth = 0; depth < hdp->depth; depth++) {
gamma[depth] = gamma_alpha[depth] / gamma_beta[depth];
}
double* w = hdp->w_aux_vector;
bool* s = hdp->s_aux_vector;
for (int64_t i = 0; i < num_dps; i++) {
w[i] = 1.0;
s[i] = false;
}
}
}
void unassign_from_parent(Factor* fctr) {
if (fctr->factor_type == BASE) {
fprintf(stderr, "Cannot unassign base factor's parent.\n");
exit(EXIT_FAILURE);
}
Factor* parent = fctr->parent;
Factor* base_fctr = get_base_factor(parent);
DirichletProcess* base_dp = base_fctr->dp;
stSet_remove(parent->children, (void*) fctr);
fctr->parent = NULL;
(parent->dp->num_factor_children)--;
if (stSet_size(parent->children) == 0) {
destroy_factor(parent);
}
int64_t num_reassign;
double mean_reassign;
double sum_sq_devs;
get_factor_stats(fctr, &mean_reassign, &sum_sq_devs, &num_reassign);
// check to see if base factor has been destroyed
if (stSet_search(base_dp->factors, (void*) base_fctr) != NULL) {
remove_update_base_factor_params(base_fctr, mean_reassign, sum_sq_devs, (double) num_reassign);
}
DirichletProcess* dp = fctr->dp;
if (dp != NULL) {
dp->cached_factor_mean = mean_reassign;
dp->cached_factor_size = num_reassign;
dp->cached_factor_sum_sq_dev = sum_sq_devs;
}
}
void assign_to_parent(Factor* fctr, Factor* parent, bool update_params) {
if (fctr->factor_type == BASE) {
fprintf(stderr, "Cannot assign base factor to a parent.\n");
exit(EXIT_FAILURE);
}
if (parent->factor_type == DATA_PT) {
fprintf(stderr, "Cannot assign data point factor to be parent.\n");
exit(EXIT_FAILURE);
}
fctr->parent = parent;
stSet_insert(parent->children, (void*) fctr);
(parent->dp->num_factor_children)++;
Factor* base_fctr = get_base_factor(parent);
if (!update_params) {
return;
}
if (fctr->factor_type == DATA_PT) {
double data_pt = get_factor_data_pt(fctr);
add_update_base_factor_params(base_fctr, data_pt, 0.0, 1.0);
}
else {
DirichletProcess* dp = fctr->dp;
add_update_base_factor_params(base_fctr, dp->cached_factor_mean, dp->cached_factor_sum_sq_dev,
(double) dp->cached_factor_size);
}
}
//Factor* sample_from_data_pt_factor(Factor* fctr, DirichletProcess* dp) {
// if (fctr->factor_type != DATA_PT) {
// fprintf(stderr, "Attempted a data point factor sample from non-data point factor.\n");
// exit(EXIT_FAILURE);
// }
//
// stSet* pool = dp->factors;
// int64_t num_fctrs = stSet_size(pool);
//
// Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs);
//
// double* cdf = (double*) malloc(sizeof(double) * (num_fctrs + 1));
// double cumul = 0.0;
//
// stSetIterator* pool_iter = stSet_getIterator(pool);
// Factor* fctr_option;
// double fctr_size;
// for (int64_t i = 0; i < num_fctrs; i++) {
// fctr_option = (Factor*) stSet_getNext(pool_iter);
// fctr_order[i] = fctr_option;
//
// fctr_size = (double) stSet_size(fctr_option->children);
// cumul += fctr_size * data_pt_factor_parent_likelihood(fctr, fctr_option);
// cdf[i] = cumul;
// }
// stSet_destructIterator(pool_iter);
//
// double gamma_param = *(dp->gamma);
// cumul += gamma_param * unobserved_factor_likelihood(fctr, dp);
// cdf[num_fctrs] = cumul;
//
// int64_t choice_idx = bisect_left(rand_uniform(cumul), cdf, num_fctrs + 1);
//
// Factor* fctr_choice;
// if (choice_idx == num_fctrs) {
// free(fctr_order);
// DirichletProcess* parent_dp = dp->parent;
// if (parent_dp == NULL) {
// fctr_choice = new_base_factor(dp->hdp);
// }
// else {
// fctr_choice = new_middle_factor(dp);
// Factor* new_fctr_parent = sample_from_data_pt_factor(fctr, parent_dp);
// assign_to_parent(fctr_choice, new_fctr_parent, false);
// }
// }
// else {
// fctr_choice = fctr_order[choice_idx];
// free(fctr_order);
// }
//
// return fctr_choice;
//}
Factor* sample_from_data_pt_factor(Factor* fctr, DirichletProcess* dp) {
if (fctr->factor_type != DATA_PT) {
fprintf(stderr, "Attempted a data point factor sample from non-data point factor.\n");
exit(EXIT_FAILURE);
}
stSet* pool = dp->factors;
int64_t num_fctrs = stSet_size(pool);
Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs);
stSetIterator* pool_iter = stSet_getIterator(pool);
for (int64_t i = 0; i < num_fctrs; i++) {
Factor* fctr_option = (Factor*) stSet_getNext(pool_iter);
fctr_order[i] = fctr_option;
}
stSet_destructIterator(pool_iter);
double* probs = (double*) malloc(sizeof(double) * num_fctrs);
double new_fctr_prob;
#pragma omp parallel shared(new_fctr_prob,probs)
{
#pragma omp single nowait
new_fctr_prob = (*(dp->gamma)) * unobserved_factor_likelihood(fctr, dp);
Factor* fctr_option;
#pragma omp for
for (int64_t i = 0; i < num_fctrs; i++) {
fctr_option = fctr_order[i];
probs[i] = stSet_size(fctr_option->children) * data_pt_factor_parent_likelihood(fctr, fctr_option);
}
}
double* cdf = (double*) malloc(sizeof(double) * (num_fctrs + 1));
parallel_cdf(cdf, probs, num_fctrs, 10);
cdf[num_fctrs] = cdf[num_fctrs - 1] + new_fctr_prob;
int64_t choice_idx = bisect_left(rand_uniform(cdf[num_fctrs]), cdf, num_fctrs + 1);
Factor* fctr_choice;
if (choice_idx == num_fctrs) {
free(fctr_order);
DirichletProcess* parent_dp = dp->parent;
if (parent_dp == NULL) {
fctr_choice = new_base_factor(dp->hdp);
}
else {
fctr_choice = new_middle_factor(dp);
Factor* new_fctr_parent = sample_from_data_pt_factor(fctr, parent_dp);
assign_to_parent(fctr_choice, new_fctr_parent, false);
}
}
else {
fctr_choice = fctr_order[choice_idx];
free(fctr_order);
}
return fctr_choice;
}
//Factor* sample_from_middle_factor(Factor* fctr, DirichletProcess* dp) {
// if (fctr->factor_type != MIDDLE) {
// fprintf(stderr, "Attempted a middle factor sample from non-middle factor.\n");
// exit(EXIT_FAILURE);
// }
//
// stSet* pool = dp->factors;
// int64_t num_fctrs = stSet_size(pool);
// int64_t num_choices = num_fctrs + 1;
//
// Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs);
// double* log_probs = (double*) malloc(sizeof(double) * num_choices);
//
// stSetIterator* pool_iter = stSet_getIterator(pool);
// for (int64_t i = 0; i < num_fctrs; i++) {
// Factor* fctr_option = (Factor*) stSet_getNext(pool_iter);
// fctr_order[i] = fctr_option;
// log_probs[i] = log((double) stSet_size(fctr_option->children))
// + factor_parent_joint_log_likelihood(fctr, fctr_option);
// }
// stSet_destructIterator(pool_iter);
//
// log_probs[num_fctrs] = log(*(dp->gamma)) + unobserved_factor_joint_log_likelihood(fctr, dp);
//
// double* cdf = (double*) malloc(sizeof(double) * num_choices);
// double cumul = 0.0;
// double normalizing_const = max(log_probs, num_choices);
//
// for (int64_t i = 0; i < num_choices; i++) {
// cumul += exp(log_probs[i] - normalizing_const);
// cdf[i] = cumul;
// }
//
// free(log_probs);
//
// int64_t choice_idx = bisect_left(rand_uniform(cumul), cdf, num_choices);
// free(cdf);
//
// Factor* fctr_choice;
// if (choice_idx == num_fctrs) {
// free(fctr_order);
// DirichletProcess* parent_dp = dp->parent;
// if (parent_dp == NULL) {
// fctr_choice = new_base_factor(dp->hdp);
// }
// else {
// fctr_choice = new_middle_factor(dp);
// Factor* new_fctr_parent = sample_from_middle_factor(fctr, parent_dp);
// assign_to_parent(fctr_choice, new_fctr_parent, false);
// }
// }
// else {
// fctr_choice = fctr_order[choice_idx];
// free(fctr_order);
// }
//
// return fctr_choice;
//}
Factor* sample_from_middle_factor(Factor* fctr, DirichletProcess* dp) {
if (fctr->factor_type != MIDDLE) {
fprintf(stderr, "Attempted a middle factor sample from non-middle factor.\n");
exit(EXIT_FAILURE);
}
stSet* pool = dp->factors;
int64_t num_fctrs = stSet_size(pool);
int64_t num_choices = num_fctrs + 1;
Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs);
double* log_probs = (double*) malloc(sizeof(double) * num_choices);
stSetIterator* pool_iter = stSet_getIterator(pool);
for (int64_t i = 0; i < num_fctrs; i++) {
fctr_order[i] = (Factor*) stSet_getNext(pool_iter);
}
stSet_destructIterator(pool_iter);
double new_fctr_log_prob;
#pragma omp parallel shared(new_fctr_log_prob,log_probs)
{
#pragma omp single nowait
new_fctr_log_prob = log(*(dp->gamma)) + unobserved_factor_joint_log_likelihood(fctr, dp);
#pragma omp for
for (int64_t i = 0; i < num_fctrs; i++) {
Factor* fctr_option = fctr_order[i];
log_probs[i] = log((double) stSet_size(fctr_option->children))
+ factor_parent_joint_log_likelihood(fctr, fctr_option);
}
}
log_probs[num_fctrs] = new_fctr_log_prob;
double normalizing_const = parallel_max(log_probs, num_choices);
parallel_add(-normalizing_const, log_probs, num_choices);
parallel_exp(log_probs, num_choices);
double* cdf = (double*) malloc(sizeof(double) * num_choices);
parallel_cdf(cdf, log_probs, num_choices, 10);
free(log_probs);
int64_t choice_idx = bisect_left(rand_uniform(cdf[num_fctrs]), cdf, num_choices);
free(cdf);
Factor* fctr_choice;
if (choice_idx == num_fctrs) {
free(fctr_order);
DirichletProcess* parent_dp = dp->parent;
if (parent_dp == NULL) {
fctr_choice = new_base_factor(dp->hdp);
}
else {
fctr_choice = new_middle_factor(dp);
Factor* new_fctr_parent = sample_from_middle_factor(fctr, parent_dp);
assign_to_parent(fctr_choice, new_fctr_parent, false);
}
}
else {
fctr_choice = fctr_order[choice_idx];
free(fctr_order);
}
return fctr_choice;
}
Factor* sample_factor(Factor* fctr, DirichletProcess* dp) {
if (fctr->factor_type == DATA_PT) {
return sample_from_data_pt_factor(fctr, dp);
}
else if (fctr->factor_type == MIDDLE) {
return sample_from_middle_factor(fctr, dp);
}
else {
fprintf(stderr, "Cannot sample base factor parent assignments.\n");
exit(EXIT_FAILURE);
}
}
void gibbs_factor_iteration(Factor* fctr) {
DirichletProcess* parent_dp = fctr->parent->dp;
unassign_from_parent(fctr);
Factor* new_parent = sample_factor(fctr, parent_dp);
assign_to_parent(fctr, new_parent, true);
}
void cache_prior_contribution(DirichletProcess* dp, double parent_prior_prod) {
if (!(dp->observed)) {
return;
}
double gamma_param = *(dp->gamma);
double total_children = (double) dp->num_factor_children;
double prior_prod = (gamma_param / (gamma_param + total_children)) * parent_prior_prod;
dp->base_factor_wt += prior_prod;
stListIterator* child_iter = stList_getIterator(dp->children);
DirichletProcess* child = (DirichletProcess*) stList_getNext(child_iter);
while (child != NULL) {
cache_prior_contribution(child, prior_prod);
child = (DirichletProcess*) stList_getNext(child_iter);
}
stList_destructIterator(child_iter);
}
void cache_base_factor_weight(Factor* fctr) {
DirichletProcess* dp = fctr->dp;
double gamma_param = *(dp->gamma);
double total_children = (double) dp->num_factor_children;
double wt = ((double) stSet_size(fctr->children)) / (gamma_param + total_children);
dp->base_factor_wt += wt;
if (stList_length(dp->children) > 0) {
stSetIterator* child_fctr_iter = stSet_getIterator(fctr->children);
Factor* child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
while (child_fctr != NULL) {
cache_base_factor_weight(child_fctr);
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
}
stSet_destructIterator(child_fctr_iter);
stListIterator* child_dp_iter = stList_getIterator(dp->children);
DirichletProcess* child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
while (child_dp != NULL) {
cache_prior_contribution(child_dp, wt);
child_dp = (DirichletProcess*) stList_getNext(child_dp_iter);
}
stList_destructIterator(child_dp_iter);
}
}
void push_factor_distr(DirichletProcess* dp, double* distr, int64_t length) {
double* sample_collector = dp->posterior_predictive;
double wt = dp->base_factor_wt;
for (int64_t i = 0; i < length; i++) {
sample_collector[i] += wt * distr[i];
}
dp->base_factor_wt = 0.0;
stListIterator* child_iter = stList_getIterator(dp->children);
DirichletProcess* child = (DirichletProcess*) stList_getNext(child_iter);
while (child != NULL) {
if (child->observed) {
push_factor_distr(child, distr, length);
}
child = (DirichletProcess*) stList_getNext(child_iter);
}
stList_destructIterator(child_iter);
}
void take_distr_sample(HierarchicalDirichletProcess* hdp) {
DirichletProcess* base_dp = hdp->base_dp;
double* grid = hdp->sampling_grid;
int64_t length = hdp->grid_length;
double* pdf = (double*) malloc(sizeof(double) * length);
//SumOfLogsMemo* log_sum_memo = hdp->log_sum_memo;
stSetIterator* base_fctr_iter = stSet_getIterator(base_dp->factors);
Factor* base_fctr = (Factor*) stSet_getNext(base_fctr_iter);
while (base_fctr != NULL) {
cache_base_factor_weight(base_fctr);
evaluate_posterior_predictive(base_fctr, grid, pdf, length);//, log_sum_memo);
push_factor_distr(base_dp, pdf, length);
base_fctr = (Factor*) stSet_getNext(base_fctr_iter);
}
stSet_destructIterator(base_fctr_iter);
cache_prior_contribution(base_dp, 1.0);
evaluate_prior_predictive(hdp, grid, pdf, length);
push_factor_distr(base_dp, pdf, length);
(hdp->samples_taken)++;
free(pdf);
}
// Knuth shuffle algorithm
DirichletProcess** get_shuffled_dps(HierarchicalDirichletProcess* hdp) {
int64_t num_dps = hdp->num_dps;
DirichletProcess** dps = hdp->dps;
DirichletProcess** shuffled_dps = (DirichletProcess**) malloc(sizeof(DirichletProcess*) * num_dps);
int64_t pos;
for (int64_t i = 0; i < num_dps; i++) {
pos = rand() % (i + 1);
shuffled_dps[i] = shuffled_dps[pos];
shuffled_dps[pos] = dps[i];
}
return shuffled_dps;
}
void sample_dp_factors(DirichletProcess* dp, int64_t* iter_counter, int64_t burn_in, int64_t thinning,
int64_t* sample_counter, int64_t num_samples) {
if (!dp->observed) {
return;
}
int64_t iter = *iter_counter;
int64_t samples_taken = *sample_counter;
// have to pre-allocate the array of sampling factors in case reassignment triggers
// destruction of the set the iterator is iterating through
int64_t num_factor_children = dp->num_factor_children;
Factor** sampling_fctrs = (Factor**) malloc(sizeof(Factor*) * num_factor_children);
int64_t i = 0;
stSetIterator* fctr_iter = stSet_getIterator(dp->factors);
Factor* fctr = (Factor*) stSet_getNext(fctr_iter);
stSetIterator* child_fctr_iter;
Factor* child_fctr;
while (fctr != NULL) {
child_fctr_iter = stSet_getIterator(fctr->children);
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
while (child_fctr != NULL) {
sampling_fctrs[i] = child_fctr;
i++;
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
}
stSet_destructIterator(child_fctr_iter);
fctr = (Factor*) stSet_getNext(fctr_iter);
}
stSet_destructIterator(fctr_iter);
for (int64_t j = 0; j < num_factor_children; j++) {
gibbs_factor_iteration(sampling_fctrs[j]);
iter++;
if (iter % thinning == 0) {
if (iter > burn_in) {
take_distr_sample(dp->hdp);
samples_taken++;
if (samples_taken >= num_samples) {
break;
}
}
}
}
free(sampling_fctrs);
*sample_counter = samples_taken;
*iter_counter = iter;
}
double sample_auxilliary_w(DirichletProcess* dp) {
return (double) genbet((float) *(dp->gamma) + 1.0, (float) dp->num_factor_children);
}
bool sample_auxilliary_s(DirichletProcess* dp) {
double num_children = (double) dp->num_factor_children;
return rand_bernoulli(num_children / (num_children + *(dp->gamma)));
}
void sample_gamma_aux_vars(HierarchicalDirichletProcess* hdp) {
double* w = hdp->w_aux_vector;
bool* s = hdp->s_aux_vector;
DirichletProcess** dps = hdp->dps;
int64_t num_dps = hdp->num_dps;
DirichletProcess* dp;
for (int64_t id = 0; id < num_dps; id++) {
dp = dps[id];
if (!dp->observed) {
continue;
}
w[id] = sample_auxilliary_w(dp);
s[id] = sample_auxilliary_s(dp);
}
}
void sample_base_gamma_internal(HierarchicalDirichletProcess* hdp, double log_w, int64_t num_factors) {
// Escobar and West's (1995) algorithm
DirichletProcess* base_dp = hdp->base_dp;
double gamma_alpha = hdp->gamma_alpha[0];
double gamma_beta = hdp->gamma_beta[0];
double num_children = (double) base_dp->num_factor_children;
double gamma_beta_post = gamma_beta - log_w;
double gamma_alpha_post = gamma_alpha + (double) num_factors;
double frac = (gamma_alpha_post - 1.0)
/ (num_children * gamma_beta_post);
double wt = frac / (1.0 + frac);
// note: different parameterization switches alpha and beta
float sample_gamma = wt * gengam(gamma_beta_post, gamma_alpha_post)
+ (1 - wt) * gengam(gamma_beta_post, gamma_alpha_post - 1.0);
hdp->gamma[0] = (double) sample_gamma;
}
void sample_middle_gammas_internal(HierarchicalDirichletProcess* hdp, int64_t depth,
double sum_log_w, int64_t sum_s, int64_t num_depth_fctrs) {
double gamma_alpha = hdp->gamma_alpha[depth];
double gamma_beta = hdp->gamma_beta[depth];
float gamma_alpha_post = (float) (gamma_alpha + (double) (num_depth_fctrs - sum_s));
float gamma_beta_post = (float) (gamma_beta - sum_log_w);
// note: different parameterization switches alpha and beta
hdp->gamma[depth] = (double) gengam(gamma_beta_post, gamma_alpha_post);
}
void sample_gammas(HierarchicalDirichletProcess* hdp, int64_t* iter_counter, int64_t burn_in,
int64_t thinning, int64_t* sample_counter, int64_t num_samples) {
int64_t iter = *iter_counter;
int64_t samples_taken = *sample_counter;
int64_t tree_depth = hdp->depth;
double* w = hdp->w_aux_vector;
bool* s = hdp->s_aux_vector;
int64_t* num_depth_fctrs = (int64_t*) malloc(sizeof(int64_t) * tree_depth);
double* sum_log_w = (double*) malloc(sizeof(double) * tree_depth);
int64_t* sum_s = (int64_t*) malloc(sizeof(int64_t) * tree_depth);
for (int64_t depth = 0; depth < tree_depth; depth++) {
num_depth_fctrs[depth] = 0;
sum_log_w[depth] = 0.0;
sum_s[depth] = 0;
}
int64_t num_dps = hdp->num_dps;
DirichletProcess** dps = hdp->dps;
//DirichletProcess* dp;
DirichletProcess* dp = NULL;
int64_t dp_depth;
for (int64_t id = 0; id < num_dps; id++) {
dp = dps[id];
if (!dp->observed) {
continue;
}
dp_depth = dp->depth;
num_depth_fctrs[dp_depth] += stSet_size(dp->factors);
sum_log_w[dp_depth] += log(w[id]);
if (s[id]) sum_s[dp_depth]++;
}
for (int64_t depth = 0; depth < tree_depth; depth++) {
if (depth == 0) {
sample_base_gamma_internal(hdp, sum_log_w[depth], num_depth_fctrs[depth]);
}
else {
sample_middle_gammas_internal(hdp, depth, sum_log_w[depth],
sum_s[depth], num_depth_fctrs[depth]);
}
iter++;
if (iter % thinning == 0) {
if (iter > burn_in) {
take_distr_sample(dp->hdp);
samples_taken++;
if (samples_taken >= num_samples) {
break;
}
}
}
}
free(sum_log_w);
free(sum_s);
free(num_depth_fctrs);
*iter_counter = iter;
*sample_counter = samples_taken;
}
void sample_gamma_params(HierarchicalDirichletProcess* hdp, int64_t* iter_counter, int64_t burn_in,
int64_t thinning, int64_t* sample_counter, int64_t num_samples) {
sample_gamma_aux_vars(hdp);
sample_gammas(hdp, iter_counter, burn_in, thinning, sample_counter, num_samples);
}
double snapshot_joint_log_density_internal(Factor* fctr) {
if (fctr->factor_type == DATA_PT) {
return log(data_pt_factor_parent_likelihood(fctr, fctr->parent));
}
else {
double log_density = 0.0;
stSetIterator* child_fctr_iter = stSet_getIterator(fctr->children);
Factor* child_fctr = stSet_getNext(child_fctr_iter);
while (child_fctr != NULL) {
log_density += snapshot_joint_log_density_internal(child_fctr);
child_fctr = stSet_getNext(child_fctr_iter);
}
stSet_destructIterator(child_fctr_iter);
return log_density;
}
}
double snapshot_joint_log_density(HierarchicalDirichletProcess* hdp) {
double log_density = 0.0;
stSetIterator* base_fctr_iter = stSet_getIterator(hdp->base_dp->factors);
Factor* base_fctr = stSet_getNext(base_fctr_iter);
while (base_fctr != NULL) {
log_density += snapshot_joint_log_density_internal(base_fctr);
base_fctr = stSet_getNext(base_fctr_iter);
}
stSet_destructIterator(base_fctr_iter);
return log_density;
}
int64_t* snapshot_num_factors(HierarchicalDirichletProcess* hdp, int64_t* length_out) {
int64_t length = hdp->num_dps;
*length_out = length;
int64_t* snapshot = (int64_t*) malloc(sizeof(int64_t) * length);
DirichletProcess** dps = hdp->dps;
for (int64_t i = 0; i < length; i++) {
snapshot[i] = (int64_t) stSet_size((dps[i])->factors);
}
return snapshot;
}
double* snapshot_gamma_params(HierarchicalDirichletProcess* hdp, int64_t* length_out) {
int64_t length = hdp->depth;
*length_out = length;
double* snapshot = (double*) malloc(sizeof(double) * length);
double* gammas = hdp->gamma;
for (int64_t i = 0; i < length; i++) {
snapshot[i] = gammas[i];
}
return snapshot;
}
double snapshot_factor_log_likelihood(Factor* fctr) {
//double parent_prob;
double parent_prob = 0.0;
double cumul = 0.0;
if (fctr->factor_type == BASE) {
fprintf(stderr, "Cannot snapshot base factor log likelihood.\n");
exit(EXIT_FAILURE);
}
else if (fctr->factor_type == DATA_PT) {
Factor* parent_fctr = fctr->parent;
DirichletProcess* parent_dp = parent_fctr->dp;
stSet* pool = parent_dp->factors;
int64_t num_fctrs = stSet_size(pool);
stSetIterator* pool_iter = stSet_getIterator(pool);
Factor* fctr_option;
double fctr_size;
double prob;
for (int64_t i = 0; i < num_fctrs; i++) {
fctr_option = (Factor*) stSet_getNext(pool_iter);
fctr_size = (double) stSet_size(fctr_option->children);
prob = fctr_size * data_pt_factor_parent_likelihood(fctr, fctr_option);
cumul += prob;
if (fctr_option == parent_fctr) {
parent_prob = prob;
}
}
stSet_destructIterator(pool_iter);
double gamma_param = *(parent_dp->gamma);
cumul += gamma_param * unobserved_factor_likelihood(fctr, parent_dp);
}
else {
DirichletProcess* dp = fctr->dp;
DirichletProcess* parent_dp = dp->parent;
Factor* parent_fctr = fctr->parent;
double mean, sum_sq_devs;
int64_t num_data;
get_factor_stats(fctr, &mean, &sum_sq_devs, &num_data);
dp->cached_factor_mean = mean;
dp->cached_factor_size = num_data;
dp->cached_factor_sum_sq_dev = sum_sq_devs;
stSet* pool = parent_dp->factors;
int64_t num_fctrs = stSet_size(pool);
int64_t num_choices = num_fctrs + 1;
double* log_probs = (double*) malloc(sizeof(double) * num_choices);
stSetIterator* pool_iter = stSet_getIterator(pool);
Factor* fctr_option;
double log_prob;
//double parent_log_prob;
double parent_log_prob = 0.0;
double fctr_size;
for (int64_t i = 0; i < num_fctrs; i++) {
fctr_option = (Factor *)stSet_getNext(pool_iter);
fctr_size = (double )stSet_size(fctr_option->children);
log_prob = factor_parent_joint_log_likelihood(fctr, fctr_option) + log(fctr_size);
log_probs[i] = log_prob;
if (fctr_option == parent_fctr) {
parent_log_prob = log_prob;
}
}
stSet_destructIterator(pool_iter);
double gamma_param = *(dp->gamma);
log_probs[num_fctrs] = unobserved_factor_joint_log_likelihood(fctr, parent_dp) + log(gamma_param);
double normalizing_const = max(log_probs, num_choices);
parent_prob = exp(parent_log_prob - normalizing_const);
for (int64_t i = 0; i < num_choices; i++) {
cumul += exp(log_probs[i] - normalizing_const);;
}
free(log_probs);
}
// TODO: this is a hack, makes it inaccurate for the early iterations
if (parent_prob == 0.0) {
return 0.0;
}
return (log(parent_prob) - log(cumul)) / 1000.0;
}
double snapshot_dir_proc_log_likelihood(DirichletProcess* dp) {
double log_likelihood = 0.0;
stSetIterator* fctr_iter = stSet_getIterator(dp->factors);
Factor* fctr = (Factor*) stSet_getNext(fctr_iter);
stSetIterator* child_fctr_iter;
Factor* child_fctr;
while (fctr != NULL) {
child_fctr_iter = stSet_getIterator(fctr->children);
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
while (child_fctr != NULL) {
log_likelihood += snapshot_factor_log_likelihood(child_fctr);
child_fctr = (Factor*) stSet_getNext(child_fctr_iter);
}
stSet_destructIterator(child_fctr_iter);
fctr = (Factor*) stSet_getNext(fctr_iter);
}
stSet_destructIterator(fctr_iter);
return log_likelihood;
}
double snapshot_log_likelihood(HierarchicalDirichletProcess* hdp) {
double log_likelihood = 0.0;
int64_t num_dps = hdp->num_dps;
DirichletProcess** dps = hdp->dps;
DirichletProcess* dp;
for (int64_t id = 0; id < num_dps; id++){
dp = dps[id];
if (!dp->observed) {
continue;
}
log_likelihood += snapshot_dir_proc_log_likelihood(dp);
}
return log_likelihood;
}
void take_snapshot(HierarchicalDirichletProcess* hdp, int64_t** num_dp_fctrs_out, int64_t* num_dps_out,
double** gamma_params_out, int64_t* num_gamma_params_out, double* log_likelihood_out,
double* log_density_out) {
*num_dp_fctrs_out = snapshot_num_factors(hdp, num_dps_out);
*gamma_params_out = snapshot_gamma_params(hdp, num_gamma_params_out);
*log_likelihood_out = snapshot_log_likelihood(hdp);
*log_density_out = snapshot_joint_log_density(hdp);
}
void execute_gibbs_sampling(HierarchicalDirichletProcess* hdp, int64_t num_samples, int64_t burn_in,
int64_t thinning, bool verbose) {
execute_gibbs_sampling_with_snapshots(hdp, num_samples, burn_in, thinning, NULL, NULL, verbose);
}
void execute_gibbs_sampling_with_snapshots(HierarchicalDirichletProcess* hdp, int64_t num_samples, int64_t burn_in, int64_t thinning,
void (*snapshot_func)(HierarchicalDirichletProcess*, void*),
void* snapshot_func_args, bool verbose) {
if (hdp->data == NULL || hdp->data_pt_dp_id == NULL) {
fprintf(stderr, "Cannot perform Gibbs sampling before passing data to HDP.\n");
exit(EXIT_FAILURE);
}
if (!hdp->finalized) {
fprintf(stderr, "Cannot perform Gibbs sampling before finalizing HDP structure.\n");
exit(EXIT_FAILURE);
}
int64_t prev_sweep_iter_count = 0;
int64_t sweep_counter = 1;
int64_t iter_counter = 0;
int64_t sample_counter = 0;
int64_t num_dps = hdp->num_dps;
int64_t non_data_pt_samples = 0;
DirichletProcess** sampling_dps;
while (sample_counter < num_samples) {
if (verbose) {
if (sweep_counter > 1) {
non_data_pt_samples = iter_counter - prev_sweep_iter_count - hdp->data_length;
}
fprintf(stderr, "Beginning sweep %"PRId64". Performed %"PRId64" sampling iterations. Previous sweep sampled from ~%"PRId64" non-data point factors. Collected %"PRId64" of %"PRId64" distribution samples.\n", sweep_counter, iter_counter, non_data_pt_samples, sample_counter, num_samples);
prev_sweep_iter_count = iter_counter;
sweep_counter++;
}
if (snapshot_func != NULL) {
snapshot_func(hdp, snapshot_func_args);
}
sampling_dps = get_shuffled_dps(hdp);
for (int64_t i = 0; i < num_dps; i++) {
sample_dp_factors(sampling_dps[i], &iter_counter, burn_in, thinning,
&sample_counter, num_samples);
if (sample_counter >= num_samples) {
break;
}
}
free(sampling_dps);
if (hdp->sample_gamma && sample_counter < num_samples) {
sample_gamma_params(hdp, &iter_counter, burn_in, thinning, &sample_counter,
num_samples);
}
}
}
void finalize_distributions(HierarchicalDirichletProcess* hdp) {
if (hdp->samples_taken <= 0) {
fprintf(stderr, "Must perform Gibbs sampling before finalizing sampled distributions.\n");
exit(EXIT_FAILURE);
}
if (hdp->splines_finalized) {
fprintf(stderr, "Distributions have already been finalized.\n");
exit(EXIT_FAILURE);
}
double inv_sample_size = 1.0 / ((double) hdp->samples_taken);
int64_t grid_length = hdp->grid_length;
double* grid = hdp->sampling_grid;
int64_t num_dps = hdp->num_dps;
DirichletProcess** dps = hdp->dps;
DirichletProcess* dp;
double* distr;
for (int64_t id = 0; id < num_dps; id++){
dp = dps[id];
if (!dp->observed) {
continue;
}
distr = dp->posterior_predictive;
for (int64_t i = 0; i < grid_length; i++) {
distr[i] = distr[i] * inv_sample_size;
}
dp->spline_slopes = spline_knot_slopes(grid, distr, grid_length);
}
hdp->splines_finalized = true;
}
double dir_proc_density(HierarchicalDirichletProcess* hdp, double x, int64_t dp_id) {
if (!hdp->splines_finalized) {
fprintf(stderr, "Must finalize distributions before querying densities.\n");
exit(EXIT_FAILURE);
}
if (dp_id < 0 || dp_id >= hdp->num_dps) {
fprintf(stderr, "Hierarchical Dirichlet process has no Dirichlet process with this ID.\n");
exit(EXIT_FAILURE);
}
DirichletProcess* dp = hdp->dps[dp_id];
while (!dp->observed) {
dp = dp->parent;
}
double interp = grid_spline_interp(x, hdp->sampling_grid, dp->posterior_predictive,
dp->spline_slopes, hdp->grid_length);
if (interp > 0.0) {
return interp;
}
else {
return 0.0;
}
}
double get_dir_proc_distance(DistributionMetricMemo* memo, int64_t dp_id_1, int64_t dp_id_2) {
int64_t num_dps = memo->num_distrs;
if (dp_id_1 < 0 || dp_id_2 < 0 || dp_id_1 >= num_dps || dp_id_2 >= num_dps) {
fprintf(stderr, "Invalid Dirchlet process ID.\n");
exit(EXIT_FAILURE);
}
if (dp_id_1 == dp_id_2) {
return 0.0;
}
if (dp_id_1 < dp_id_2) {
return get_dir_proc_distance(memo, dp_id_2, dp_id_1);
}
int64_t idx = ((dp_id_1 - 1) * dp_id_1) / 2 + dp_id_2;
double* matrix = memo->memo_matrix;
if (matrix[idx] < 0) {
matrix[idx] = memo->metric_func(memo->hdp, dp_id_1, dp_id_2);
}
return matrix[idx];
}
double dir_proc_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2,
double (*dist_func)(double*, double*, double*, int64_t)) {
if (!hdp->splines_finalized) {
fprintf(stderr, "Cannot compute a Shannon-Jensen divergence before finalizing distributions.\n");
exit(EXIT_FAILURE);
}
int64_t grid_length = hdp->grid_length;
double* grid = hdp->sampling_grid;
DirichletProcess* dp_1 = hdp->dps[dp_id_1];
DirichletProcess* dp_2 = hdp->dps[dp_id_2];
while (!dp_1->observed) {
dp_1 = dp_1->parent;
}
while (!dp_2->observed) {
dp_2 = dp_2->parent;
}
double* distr_1 = dp_1->posterior_predictive;
double* distr_2 = dp_2->posterior_predictive;
return dist_func(grid, distr_1, distr_2, grid_length);
}
double kl_divergence(double* x, double* distr_1, double* distr_2, int64_t length) {
double divergence = 0.0;
double left_pt = distr_1[0] * log(distr_1[0] / distr_2[0]) + distr_2[0] * log(distr_2[0] / distr_1[0]);
double right_pt;
double dx;
for (int64_t i = 1; i < length; i++) {
right_pt = distr_1[i] * log(distr_1[i] / distr_2[i]) + distr_2[i] * log(distr_2[i] / distr_1[i]);
dx = x[i] - x[i - 1];
divergence += 0.5 * (left_pt + right_pt) * dx;
left_pt = right_pt;
}
return divergence;
}
double dir_proc_kl_divergence(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) {
return dir_proc_distance(hdp, dp_id_1, dp_id_2, &kl_divergence);
}
DistributionMetricMemo* new_kl_divergence_memo(HierarchicalDirichletProcess* hdp) {
return new_distr_metric_memo(hdp, &dir_proc_kl_divergence);
}
double hellinger_distance(double* x, double* distr_1, double* distr_2, int64_t length) {
double integral = 0.0;
double left_pt = sqrt(distr_1[0] * distr_2[0]);
double right_pt;
double dx;
for (int64_t i = 1; i < length; i++) {
right_pt = sqrt(distr_1[i] * distr_2[i]);
dx = x[i] - x[i - 1];
integral += 0.5 * (left_pt + right_pt) * dx;
left_pt = right_pt;
}
return sqrt(1.0 - integral);
}
double dir_proc_hellinger_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) {
return dir_proc_distance(hdp, dp_id_1, dp_id_2, &hellinger_distance);
}
DistributionMetricMemo* new_hellinger_distance_memo(HierarchicalDirichletProcess* hdp) {
return new_distr_metric_memo(hdp, &dir_proc_hellinger_distance);
}
double l2_distance(double* x, double* distr_1, double* distr_2, int64_t length) {
double integral = 0.0;
double diff = distr_1[0] - distr_2[0];
double left_pt = diff * diff;
double right_pt;
double dx;
for (int64_t i = 1; i < length; i++) {
diff = distr_1[i] - distr_2[i];
right_pt = diff * diff;
dx = x[i] - x[i - 1];
integral += 0.5 * (left_pt + right_pt) * dx;
left_pt = right_pt;
}
return sqrt(integral);
}
double dir_proc_l2_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) {
return dir_proc_distance(hdp, dp_id_1, dp_id_2, &l2_distance);
}
DistributionMetricMemo* new_l2_distance_memo(HierarchicalDirichletProcess* hdp) {
return new_distr_metric_memo(hdp, &dir_proc_l2_distance);
}
double shannon_jensen_distance(double* x, double* distr_1, double* distr_2, int64_t length) {
double divergence = 0.0;
double mean_distr_pt = 0.5 * (distr_1[0] + distr_2[0]);
double left_pt = 0.5 * (distr_1[0] * log(distr_1[0] / mean_distr_pt) + distr_2[0] * log(distr_2[0] / mean_distr_pt));
double right_pt;
double dx;
for (int64_t i = 1; i < length; i++) {
mean_distr_pt = 0.5 * (distr_1[i] + distr_2[i]);
right_pt = 0.5 * (distr_1[i] * log(distr_1[i] / mean_distr_pt) + distr_2[i] * log(distr_2[i] / mean_distr_pt));
dx = x[i] - x[i - 1];
divergence += 0.5 * (left_pt + right_pt) * dx;
left_pt = right_pt;
}
return sqrt(divergence);
}
double dir_proc_shannon_jensen_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) {
return dir_proc_distance(hdp, dp_id_1, dp_id_2, &shannon_jensen_distance);
}
DistributionMetricMemo* new_shannon_jensen_distance_memo(HierarchicalDirichletProcess* hdp) {
return new_distr_metric_memo(hdp, &dir_proc_shannon_jensen_distance);
}
double dir_proc_expected_val(HierarchicalDirichletProcess* hdp, int64_t dp_id) {
double* grid = hdp->sampling_grid;
int64_t grid_length = hdp->grid_length;
double* distr = hdp->dps[dp_id]->posterior_predictive;
double expected_val = 0.0;
double dx;
for (int64_t i = 1; i < grid_length; i++) {
dx = grid[i] - grid[i - 1];
expected_val += grid[i] * distr[i] * dx;
}
return expected_val;
}
double dir_proc_variance(HierarchicalDirichletProcess* hdp, int64_t dp_id) {
double* grid = hdp->sampling_grid;
int64_t grid_length = hdp->grid_length;
double* distr = hdp->dps[dp_id]->posterior_predictive;
double expected_val = dir_proc_expected_val(hdp, dp_id);
double variance = 0.0;
double dev;
double dx;
for (int64_t i = 1; i < grid_length; i++) {
dx = grid[i] - grid[i-1];
dev = grid[i] - expected_val;
variance += dev * dev * distr[i] * dx;;
}
return variance;
}
double compare_hdp_distrs(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1, // this HDP is the master for grid samples
HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2,
double (*dist_func)(double*, double*, double*, int64_t)) {
if (!hdp_1->splines_finalized || !hdp_2->splines_finalized) {
fprintf(stderr, "Must finalize distributions of both hierarchical Dirichelt processes before comparing.\n");
exit(EXIT_FAILURE);
}
int64_t num_dps_1 = hdp_1->num_dps;
int64_t num_dps_2 = hdp_2->num_dps;
if (dp_id_1 < 0 || dp_id_2 < 0 || dp_id_1 >= num_dps_1 || dp_id_2 >= num_dps_2) {
fprintf(stderr, "Invalid Dirchlet process ID.\n");
exit(EXIT_FAILURE);
}
double* grid = hdp_1->sampling_grid;
int64_t grid_length = hdp_1->grid_length;
DirichletProcess* dp_1 = hdp_1->dps[dp_id_1];
while (!dp_1->observed) {
dp_1 = dp_1->parent;
}
double* distr_1 = dp_1->posterior_predictive;
double* distr_2 = (double*) malloc(sizeof(double) * grid_length);
for (int64_t i = 0; i < grid_length; i++) {
distr_2[i] = dir_proc_density(hdp_2, grid[i], dp_id_2);
}
return dist_func(grid, distr_1, distr_2, grid_length);
}
double compare_hdp_distrs_kl_divergence(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1,
HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) {
return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &kl_divergence);
}
double compare_hdp_distrs_l2_distance(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1,
HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) {
return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &l2_distance);
}
double compare_hdp_distrs_shannon_jensen_distance(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1,
HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) {
return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &shannon_jensen_distance);
}
double compare_hdp_distrs_hellinger_distance(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1,
HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) {
return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &hellinger_distance);
}
void serialize_factor_tree_internal(FILE* out, Factor* fctr, int64_t parent_id, int64_t* next_fctr_id, uintptr_t data_start) {
int64_t id = *next_fctr_id;
(*next_fctr_id)++;
// factor type
if (fctr->factor_type == BASE) {
fprintf(out, "0\t");
}
else if (fctr->factor_type == MIDDLE) {
fprintf(out, "1\t");
}
else {
fprintf(out, "2\t");
}
// parent id
if (fctr->factor_type == BASE) {
fprintf(out, "-\t");
}
else {
fprintf(out, "%"PRId64"\t", parent_id);
}
// extra data based on type
if (fctr->factor_type == BASE) {
// cached params
double* param_array = fctr->factor_data;
for (int64_t i = 0; i < N_IG_NUM_PARAMS; i++) {
fprintf(out, "%.17lg;", param_array[i]);
}
fprintf(out, "%.17lg", param_array[N_IG_NUM_PARAMS]);
}
else if (fctr->factor_type == MIDDLE) {
// dp id
fprintf(out, "%"PRId64, fctr->dp->id);
}
else {
// data index
uintptr_t data_pos = (uintptr_t) fctr->factor_data;
fprintf(out, "%"PRId64, ((int64_t) (data_pos - data_start)) / sizeof(int64_t));
}
fprintf(out, "\n");
if (fctr->children != NULL) {
stSetIterator* iter = stSet_getIterator(fctr->children);
Factor* child_fctr = (Factor*) stSet_getNext(iter);
while (child_fctr != NULL) {
serialize_factor_tree_internal(out, child_fctr, id, next_fctr_id, data_start);
child_fctr = (Factor*) stSet_getNext(iter);
}
stSet_destructIterator(iter);
}
}
void serialize_hdp(HierarchicalDirichletProcess* hdp, FILE* out) {
int64_t num_dps = hdp->num_dps;
int64_t num_data = hdp->data_length;
double* data = hdp->data;
int64_t* dp_ids = hdp->data_pt_dp_id;
int64_t grid_length = hdp->grid_length;
double* grid = hdp->sampling_grid;
int64_t depth = hdp->depth;
double* gamma_params = hdp->gamma;
double* gamma_alpha = hdp->gamma_alpha;
double* gamma_beta = hdp->gamma_beta;
double* w_aux_vector = hdp->w_aux_vector;
bool* s_aux_vector = hdp->s_aux_vector;
DirichletProcess** dps = hdp->dps;
DirichletProcess* base_dp = hdp->base_dp;
bool has_data = hdp->data != NULL;
if (!hdp->finalized) {
fprintf(stderr, "Can only serialize HierarchicalDirichletProcess with finalized structure");
exit(EXIT_FAILURE);
}
// splines finalized
fprintf(out, "%"PRId64"\n", (int64_t) hdp->splines_finalized);
// has data
fprintf(out, "%"PRId64"\n", (int64_t) has_data);
// sample gamma
fprintf(out, "%"PRId64"\n", (int64_t) hdp->sample_gamma);
// num dps
fprintf(out, "%"PRId64"\n", num_dps);
// data
if (has_data) {
for (int64_t i = 0; i < num_data - 1; i++) {
fprintf(out, "%.17lg\t", data[i]);
}
fprintf(out, "%.17lg\n", data[num_data - 1]);
// dp ids
for (int64_t i = 0; i < num_data - 1; i++) {
fprintf(out, "%"PRId64"\t", dp_ids[i]);
}
fprintf(out, "%"PRId64"\n", dp_ids[num_data - 1]);
}
// base params
fprintf(out, "%.17lg\t%.17lg\t%.17lg\t%.17lg\n", hdp->mu, hdp->nu, (hdp->two_alpha) / 2.0, hdp->beta);
// sampling grid
fprintf(out, "%.17lg\t%.17lg\t%"PRId64"\n", grid[0], grid[grid_length - 1], grid_length);
// gamma
for (int64_t i = 0; i < depth - 1; i++) {
fprintf(out, "%.17lg\t", gamma_params[i]);
}
fprintf(out, "%.17lg\n", gamma_params[depth - 1]);
// gamma distr params
if (hdp->sample_gamma) {
// alpha
for (int64_t i = 0; i < depth - 1; i++) {
fprintf(out, "%.17lg\t", gamma_alpha[i]);
}
fprintf(out, "%.17lg\n", gamma_alpha[depth - 1]);
// beta
for (int64_t i = 0; i < depth - 1; i++) {
fprintf(out, "%.17lg\t", gamma_beta[i]);
}
fprintf(out, "%.17lg\n", gamma_beta[depth - 1]);
// w
for (int64_t i = 0; i < num_dps - 1; i++) {
fprintf(out, "%.17lg\t", w_aux_vector[i]);
}
fprintf(out, "%.17lg\n", w_aux_vector[num_dps - 1]);
// s
for (int64_t i = 0; i < num_dps - 1; i++) {
fprintf(out, "%"PRId64"\t", (int64_t) s_aux_vector[i]);
}
fprintf(out, "%"PRId64"\n", (int64_t) s_aux_vector[num_dps - 1]);
}
// dp parents
DirichletProcess* dp;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
// parent
if (dp == base_dp) {
fprintf(out, "-\t%"PRId64"\n", dp->num_factor_children);
}
else {
fprintf(out, "%"PRId64"\t%"PRId64"\n", dp->parent->id, dp->num_factor_children);
}
}
// post preds
if (has_data) {
double* post_pred;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
post_pred = dp->posterior_predictive;
if (post_pred != NULL) {
for (int64_t j = 0; j < grid_length - 1; j++) {
fprintf(out, "%.17lg\t", post_pred[j]);
}
fprintf(out, "%.17lg", post_pred[grid_length - 1]);
}
fprintf(out, "\n");
}
}
// spline slopes
if (hdp->splines_finalized) {
double* slopes;
for (int64_t i = 0; i < num_dps; i++) {
dp = dps[i];
slopes = dp->spline_slopes;
if (slopes != NULL) {
for (int64_t i = 0; i < grid_length - 1; i++) {
fprintf(out, "%.17lg\t", slopes[i]);
}
fprintf(out, "%.17lg", slopes[grid_length - 1]);
}
fprintf(out, "\n");
}
}
// factors
if (has_data) {
int64_t next_fctr_id = 0;
uintptr_t data_start = (uintptr_t) hdp->data;
stSetIterator* iter = stSet_getIterator(base_dp->factors);
Factor* fctr = (Factor*) stSet_getNext(iter);
while (fctr != NULL) {
serialize_factor_tree_internal(out, fctr, -1, &next_fctr_id, data_start);
fctr = (Factor*) stSet_getNext(iter);
}
stSet_destructIterator(iter);
}
}
HierarchicalDirichletProcess* deserialize_hdp(FILE* in) {
// splines finalized
char* end;
char* line = stFile_getLineFromFile(in);
bool splines_finalized = (bool) strtol(line, &end, 10);
free(line);
// has data
line = stFile_getLineFromFile(in);
bool has_data = (bool) strtol(line, &end, 10);
free(line);
// sample gamma
line = stFile_getLineFromFile(in);
bool sample_gamma = (bool) strtol(line, &end, 10);
free(line);
// num dps
line = stFile_getLineFromFile(in);
int64_t num_dps = (int64_t) strtol(line, &end, 10);
free(line);
double* data;
//int64_t* dp_ids;
int64_t* dp_ids = NULL;
int64_t data_length;
stList* tokens;
if (has_data) {
// data
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
data_length = stList_length(tokens);
data = (double*) malloc(sizeof(double) * data_length);
for (int64_t i = 0; i < data_length; i++) {
sscanf(stList_get(tokens, i), "%lf", &(data[i]));
}
free(line);
stList_destruct(tokens);
// dp ids
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
dp_ids = (int64_t*) malloc(sizeof(int64_t) * data_length);
for (int64_t i = 0; i < data_length; i++) {
sscanf((char*) stList_get(tokens, i), "%"SCNd64, &(dp_ids[i]));
}
free(line);
stList_destruct(tokens);
}
// base params
line = stFile_getLineFromFile(in);
double mu, nu, alpha, beta;
sscanf(line, "%lg\t%lg\t%lg\t%lg", &mu, &nu, &alpha, &beta);
free(line);
// sampling grid
line = stFile_getLineFromFile(in);
double grid_start, grid_stop;
int64_t grid_length;
sscanf(line, "%lg\t%lg\t%"SCNd64, &grid_start, &grid_stop, &grid_length);
free(line);
// gamma
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
int64_t depth = stList_length(tokens);
double* gamma_params = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
sscanf((char*) stList_get(tokens, i), "%lf", &(gamma_params[i]));
}
free(line);
stList_destruct(tokens);
// gamma distr params
double* gamma_alpha;
double* gamma_beta;
double* w;
bool* s;
int64_t s_int;
if (sample_gamma) {
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
// gamma alpha
gamma_alpha = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
sscanf((char*) stList_get(tokens, i), "%lf", &(gamma_alpha[i]));
}
free(line);
stList_destruct(tokens);
// gamma beta
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
gamma_beta = (double*) malloc(sizeof(double) * depth);
for (int64_t i = 0; i < depth; i++) {
sscanf((char*) stList_get(tokens, i), "%lf", &(gamma_beta[i]));
}
free(line);
stList_destruct(tokens);
// w
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
w = (double*) malloc(sizeof(double) * num_dps);
for (int64_t i = 0; i < num_dps; i++) {
sscanf((char*) stList_get(tokens, i), "%lf", &(w[i]));
}
free(line);
stList_destruct(tokens);
// s
line = stFile_getLineFromFile(in);
tokens = stString_split(line);
s = (bool*) malloc(sizeof(bool) * num_dps);
for (int64_t i = 0; i < num_dps; i++) {
sscanf((char*) stList_get(tokens, i), "%"SCNd64, &s_int);
s[i] = (bool) s_int;
}
free(line);
stList_destruct(tokens);
}
// construct hdp
HierarchicalDirichletProcess* hdp;
if (sample_gamma) {
hdp = new_hier_dir_proc_2(num_dps, depth, gamma_alpha, gamma_beta, grid_start,
grid_stop, grid_length, mu, nu, alpha, beta);
for (int64_t i = 0; i < depth; i++) {
hdp->gamma[i] = gamma_params[i];
}
free(gamma_params);
for (int64_t i = 0; i < num_dps; i++) {
hdp->w_aux_vector[i] = w[i];
hdp->s_aux_vector[i] = s[i];
}
free(w);
free(s);
}
else {
hdp = new_hier_dir_proc(num_dps, depth, gamma_params, grid_start, grid_stop,
grid_length, mu, nu, alpha, beta);
}
DirichletProcess** dps = hdp->dps;
DirichletProcess* dp;
// dp parents and num children
int64_t parent_id;
int64_t num_factor_children;
for (int64_t id = 0; id < num_dps; id++) {
line = stFile_getLineFromFile(in);
if (line[0] != '-') {
sscanf(line, "%"SCNd64"\t%"SCNd64, &parent_id, &num_factor_children);
set_dir_proc_parent(hdp, id, parent_id);
(dps[id])->num_factor_children = num_factor_children;
}
else {
sscanf(line, "-\t%"SCNd64, &num_factor_children);
(dps[id])->num_factor_children = num_factor_children; }
free(line);
}
finalize_hdp_structure(hdp);
// give it data
if (has_data) {
// note: don't use pass_hdp_data because want to manually init factors
hdp->data = data;
hdp->data_pt_dp_id = dp_ids;
hdp->data_length = data_length;
verify_valid_dp_assignments(hdp);
mark_observed_dps(hdp);
// post predictives
double* post_pred;
for (int64_t id = 0; id < num_dps; id++) {
dp = dps[id];
line = stFile_getLineFromFile(in);
stList* tokens = stString_split(line);
if (stList_length(tokens) != 0) {
free(dp->posterior_predictive);
dp->posterior_predictive = (double*) malloc(sizeof(double) * grid_length);
post_pred = dp->posterior_predictive;
for (int64_t i = 0; i < grid_length; i++) {
sscanf((char*) stList_get(tokens, i), "%lf\n", &(post_pred[i]));
}
}
free(line);
stList_destruct(tokens);
}
}
double* spline_slopes;
if (splines_finalized) {
hdp->splines_finalized = true;
for (int64_t id = 0; id < num_dps; id++) {
dp = dps[id];
line = stFile_getLineFromFile(in);
stList* tokens = stString_split(line);
if (stList_length(tokens) != 0) {
spline_slopes = (double*) malloc(sizeof(double) * grid_length);
dp->spline_slopes = spline_slopes;
for (int64_t i = 0; i < grid_length; i++) {
sscanf((char*) stList_get(tokens, i), "%lf", &(spline_slopes[i]));
}
}
free(line);
stList_destruct(tokens);
}
}
if (has_data) {
char* type_str;
char* parent_str;
char* dp_str;
char* idx_str;
char* params_str;
int64_t type_int;
int64_t dp_id;
int64_t data_pt_idx;
int64_t parent_idx;
double* param_array;
stList* params_list;
Factor* fctr;
Factor* parent_fctr;
stList* fctr_list = stList_construct();
line = stFile_getLineFromFile(in);
while (line != NULL) {
tokens = stString_split(line);
type_str = (char*) stList_get(tokens, 0);
sscanf(type_str, "%"SCNd64, &type_int);
if (type_int == 0) {
fctr = new_base_factor(hdp);
params_str = (char*) stList_get(tokens, 2);
params_list = stString_splitByString(params_str, ";");
param_array = fctr->factor_data;
for (int64_t i = 0; i < N_IG_NUM_PARAMS + 1; i++) {
sscanf((char*) stList_get(params_list, i), "%lf", ¶m_array[i]);
}
stList_destruct(params_list);
}
else if (type_int == 1) {
dp_str = (char*) stList_get(tokens, 2);
sscanf(dp_str, "%"SCNd64, &dp_id);
fctr = new_middle_factor(dps[dp_id]);
}
else if (type_int == 2) {
idx_str = (char*) stList_get(tokens, 2);;
sscanf(idx_str, "%"SCNd64, &data_pt_idx);
fctr = new_data_pt_factor(hdp, data_pt_idx);
}
else {
fprintf(stderr, "Deserialization error");
exit(EXIT_FAILURE);
}
stList_append(fctr_list, (void*) fctr);
// set parent if appicable
parent_str = (char*) stList_get(tokens, 1);
if (parent_str[0] != '-') {
sscanf(parent_str, "%"SCNd64, &parent_idx);
parent_fctr = (Factor*) stList_get(fctr_list, parent_idx);
fctr->parent = parent_fctr;
stSet_insert(parent_fctr->children, (void*) fctr);
}
free(line);
line = stFile_getLineFromFile(in);
}
stList_destruct(fctr_list);
}
return hdp;
}
|
udr-2.c | /* { dg-do run } */
extern void abort ();
struct S { int s; };
#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:int:omp_out += omp_in)
int
main ()
{
int u = 0, q = 0;
struct S s, t;
s.s = 0; t.s = 0;
#pragma omp parallel reduction(+:s, q) reduction(foo:t, u)
{
if (s.s != 0 || t.s != 0 || u != 0 || q != 0) abort ();
s.s = 6;
t.s = 8;
u = 9;
q++;
}
if (s.s != 6 * q || t.s != 8 * q || u != 9 * q) abort ();
return 0;
}
|
crop_and_resize.c | #include <TH/TH.h>
#include <stdio.h>
#include <math.h>
void CropAndResizePerBox(
const float * image_data,
const int batch_size,
const int depth,
const int image_height,
const int image_width,
const float * boxes_data,
const int * box_index_data,
const int start_box,
const int limit_box,
float * corps_data,
const int crop_height,
const int crop_width,
const float extrapolation_value
) {
const int image_channel_elements = image_height * image_width;
const int image_elements = depth * image_channel_elements;
const int channel_elements = crop_height * crop_width;
const int crop_elements = depth * channel_elements;
int b;
#pragma omp parallel for
for (b = start_box; b < limit_box; ++b) {
const float * box = boxes_data + b * 4;
const float y1 = box[0];
const float x1 = box[1];
const float y2 = box[2];
const float x2 = box[3];
const int b_in = box_index_data[b];
if (b_in < 0 || b_in >= batch_size) {
printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size);
exit(-1);
}
const float height_scale =
(crop_height > 1)
? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y)
{
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1)
{
for (int x = 0; x < crop_width; ++x)
{
for (int d = 0; d < depth; ++d)
{
// crops(b, y, x, d) = extrapolation_value;
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value;
}
}
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x)
{
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1)
{
for (int d = 0; d < depth; ++d)
{
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value;
}
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d)
{
const float *pimage = image_data + b_in * image_elements + d * image_channel_elements;
const float top_left = pimage[top_y_index * image_width + left_x_index];
const float top_right = pimage[top_y_index * image_width + right_x_index];
const float bottom_left = pimage[bottom_y_index * image_width + left_x_index];
const float bottom_right = pimage[bottom_y_index * image_width + right_x_index];
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = top + (bottom - top) * y_lerp;
}
} // end for x
} // end for y
} // end for b
}
void crop_and_resize_forward(
THFloatTensor * image,
THFloatTensor * boxes, // [y1, x1, y2, x2]
THIntTensor * box_index, // range in [0, batch_size)
const float extrapolation_value,
const int crop_height,
const int crop_width,
THFloatTensor * crops
) {
//const int batch_size = image->size[0];
//const int depth = image->size[1];
//const int image_height = image->size[2];
//const int image_width = image->size[3];
//const int num_boxes = boxes->size[0];
const int batch_size = THFloatTensor_size(image, 0);
const int depth = THFloatTensor_size(image, 1);
const int image_height = THFloatTensor_size(image, 2);
const int image_width = THFloatTensor_size(image, 3);
const int num_boxes = THFloatTensor_size(boxes, 0);
// init output space
THFloatTensor_resize4d(crops, num_boxes, depth, crop_height, crop_width);
THFloatTensor_zero(crops);
// crop_and_resize for each box
CropAndResizePerBox(
THFloatTensor_data(image),
batch_size,
depth,
image_height,
image_width,
THFloatTensor_data(boxes),
THIntTensor_data(box_index),
0,
num_boxes,
THFloatTensor_data(crops),
crop_height,
crop_width,
extrapolation_value
);
}
void crop_and_resize_backward(
THFloatTensor * grads,
THFloatTensor * boxes, // [y1, x1, y2, x2]
THIntTensor * box_index, // range in [0, batch_size)
THFloatTensor * grads_image // resize to [bsize, c, hc, wc]
)
{
// shape
//const int batch_size = grads_image->size[0];
//const int depth = grads_image->size[1];
//const int image_height = grads_image->size[2];
//const int image_width = grads_image->size[3];
//const int num_boxes = grads->size[0];
//const int crop_height = grads->size[2];
//const int crop_width = grads->size[3];
const int batch_size = THFloatTensor_size(grads_image, 0);
const int depth = THFloatTensor_size(grads_image, 1);
const int image_height = THFloatTensor_size(grads_image, 2);
const int image_width = THFloatTensor_size(grads_image, 3);
const int num_boxes = THFloatTensor_size(grads, 0);
const int crop_height = THFloatTensor_size(grads,2);
const int crop_width = THFloatTensor_size(grads,3);
// n_elements
const int image_channel_elements = image_height * image_width;
const int image_elements = depth * image_channel_elements;
const int channel_elements = crop_height * crop_width;
const int crop_elements = depth * channel_elements;
// init output space
THFloatTensor_zero(grads_image);
// data pointer
const float * grads_data = THFloatTensor_data(grads);
const float * boxes_data = THFloatTensor_data(boxes);
const int * box_index_data = THIntTensor_data(box_index);
float * grads_image_data = THFloatTensor_data(grads_image);
for (int b = 0; b < num_boxes; ++b) {
const float * box = boxes_data + b * 4;
const float y1 = box[0];
const float x1 = box[1];
const float y2 = box[2];
const float x2 = box[3];
const int b_in = box_index_data[b];
if (b_in < 0 || b_in >= batch_size) {
printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size);
exit(-1);
}
const float height_scale =
(crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y)
{
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1)
{
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x)
{
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1)
{
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d)
{
float *pimage = grads_image_data + b_in * image_elements + d * image_channel_elements;
const float grad_val = grads_data[crop_elements * b + channel_elements * d + y * crop_width + x];
const float dtop = (1 - y_lerp) * grad_val;
pimage[top_y_index * image_width + left_x_index] += (1 - x_lerp) * dtop;
pimage[top_y_index * image_width + right_x_index] += x_lerp * dtop;
const float dbottom = y_lerp * grad_val;
pimage[bottom_y_index * image_width + left_x_index] += (1 - x_lerp) * dbottom;
pimage[bottom_y_index * image_width + right_x_index] += x_lerp * dbottom;
} // end d
} // end x
} // end y
} // end b
} |
loops.c | /**
* loops.c
*
* Counting loops with and without OpenMP
*
* @author pikryukov
* @version 2.5
*
* e-mail: kryukov@frtk.ru
*
* Copyright (C) Kryukov Pavel 2012
* for MIPT Parallel Algorithms course.
*/
#include <stdio.h> /* fprintf, fopen, fclose */
#include <stdlib.h> /* malloc, free */
#include <math.h> /* sin */
/* Sizes of array */
#define ISIZE 10000
#define JSIZE 10000
/* Amount of diagonals in array */
#define DSIZE (ISIZE + JSIZE - 1)
/* Size of largest diagonal */
#define HIPOTEN (JSIZE < ISIZE ? JSIZE : ISIZE)
/* Counting function */
#define FUNC(x) (sin(0.00001 * (x)))
/* Name of outputfile */
#ifdef DIAGMODE
# define FILENAME "new"
#else
# define FILENAME "old"
#endif
/* Check of compling flags */
#ifndef LOOP
# error Please define loop number with LOOP define
#elif LOOP > 3 || LOOP < 1
# error Incorrect loop number, should be from 1 to 3
#endif
/**
* Allocates memory to 2-dim array
* @return array pointer
*/
double** init()
{
int i;
double** ptr = (double**)malloc(sizeof(double*) * ISIZE);
for (i = 0; i < ISIZE; ++i)
ptr[i] = (double*)malloc(sizeof(double) * JSIZE);
return ptr;
}
/**
* Frees memory of 2-dim array
* @param a array pointer
*/
void destroy(double** ptr)
{
int i;
for (i = 0; i < ISIZE; ++i)
free(ptr[i]);
free(ptr);
}
/**
* Fills 2-dim array of doubles with integer numbers
* @param a array pointer
*/
void fill(double** a)
{
int i;
for (i = 0; i < ISIZE; i++)
{
const int i10 = 10 * i;
int j;
for (j = 0; j < JSIZE; j++)
a[i][j] = i10 + j;
}
}
/**
* Prints 2-dim array of doubles with integer numbers into file
* @param a array pointer
*/
void print(double** a)
{
int i;
FILE* ff;
char filename[20];
sprintf(filename, "%sresult%d", FILENAME, LOOP);
ff = fopen(filename,"w");
for (i = 0; i < HIPOTEN; ++i)
fprintf(ff, "%f ", a[i][i]);
fclose(ff);
}
/**
* Recounts 2-dim array of doubles with integer numbers iteratively
* @param a array pointer
*/
void process(double** a)
{
int i;
/* We need to be inside the array in any case! */
for (i = (LOOP == 2); i < ISIZE - (LOOP == 3); ++i)
{
int j;
for (j = (LOOP == 3); j < JSIZE - (LOOP == 2); ++j)
#if LOOP == 1
a[i][j] = FUNC(a[i][j]);
#elif LOOP == 2
a[i][j] = FUNC(a[i - 1][j + 1]);
#else /* LOOP == 3 */
a[i][j] = FUNC(a[i + 1][j - 1]);
#endif
}
}
#ifdef DIAGMODE
/* We are splitting array into diagonals
* -> i
* | #///////////
* j V #///////////
* #///////////
* ############
*
* Examples of diagonals:
* a[0][0]
* a[0][1] -> a[1][0]
* a[0][3] -> a[1][2] -> a[1][2] -> a[3][0]
* a[1][3] -> a[2][2] -> a[3][1] -> a[4][0]
* Every diagonal is counted independently, so we can separate it to threads
*
* Enumerating diagonal points.
* d[0] -> d[1] -> d[2] -> d[3]
* Now loops can be easy rewritten:
* loop1: d'[k] = f(d[k])
* loop2: d'[k] = f(d[k - 1])
* loop3: d'[k - 1] = f(d[k])
*/
/**
* Recounts 2-dim array of doubles with integer numbers
* using diagonal computing
* @param a array pointer
*/
void diagprocess(double** a)
{
int d;
#pragma omp parallel for shared(a) private(d)
for (d = 0; d < DSIZE; ++d)
{
/* Counting coordinates of 1st point of diagonal */
const int istart = d < JSIZE ? 0 : d - JSIZE + 1;
const int jstart = d < JSIZE ? d : JSIZE - 1;
int size, x;
/* Counting size of diagonal */
if (d < JSIZE && d < ISIZE) {
/* Diagonals that starts on I axis and ends on J axis */
size = d + 1;
}
else if ((d < JSIZE) != (d < ISIZE)) {
/* Diagonals with ends on opposite sides of array */
size = HIPOTEN;
}
else {
/* Diagonals with ends on opposite to axis sides of array */
size = DSIZE - d;
}
for (x = (LOOP != 1); x < size; ++x) {
#if LOOP == 1
a[istart + x][jstart - x] = FUNC(a[istart + x][jstart - x]);
#elif LOOP == 2
a[istart + x][jstart - x] = FUNC(a[istart + x - 1][jstart - x + 1]);
#else /* LOOP == 3 */
a[istart + x - 1][jstart - x + 1] = FUNC(a[istart + x][jstart - x]);
#endif
}
}
}
#endif /* DIAGMODE */
/**
* Entry point
* @param argc arguments counter
* @param argv arguments. They are ignored.
* @return 0
*/
int main(int argc, char **argv)
{
double** a = init();
fill(a);
#ifdef DIAGMODE
diagprocess(a);
#else
process(a);
#endif
print(a);
destroy(a);
return 0;
}
|
GB_unop__identity_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_int8)
// op(A') function: GB (_unop_tran__identity_uint8_int8)
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_int8)
(
uint8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-target-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:4:1, col:24>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:10:1, col:24>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:17:1, col:36>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:25, col:35>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:34> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:34> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetSimdDirective {{.*}} <line:24:1, col:36>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:25, col:35>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:34> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:34> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetSimdDirective {{.*}} <line:31:1, col:36>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:25, col:35>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:34> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:34> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
DRB028-privatemissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
tmp should be annotated as private to avoid race condition.
Data race pairs: tmp@65:5 vs. tmp@66:12
tmp@65:5 vs. tmp@65:5
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int tmp;
int len=100;
int a[100];
#pragma omp parallel for
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for private(tmp)
for (i=0;i<len;i++)
{
tmp =a[i]+i;
a[i] = tmp;
}
printf("a[50]=%d\n", a[50]);
return 0;
}
|
statistical_analysis.c | #include<stdio.h>
#include<omp.h>
#include<math.h>
#define INT_MAX 9999999
#define INT_MIN -999999
int main() {
int n;
scanf("%d", &n);
int marks[n];
for(int i=0; i<n; i++)
{
scanf("%d", (marks+i));
}
int max = INT_MIN;
int min = INT_MAX;
int sum = 0;
#pragma omp parallel for shared(marks) reduction (+:sum)
for(int i=0; i<n; i++)
{
int current = marks[i];
sum+=current;
#pragma omp critical
{
if(current>max)
max = current;
if(current < min)
min = current;
}
}
double average = sum/(double)n;
double sqDiff = 0;
#pragma omp parallel for reduction (+:sqDiff)
for(int i=0; i<n; i++)
{
sqDiff += pow(average-(double)marks[i],2);
}
double sd = sqrt(sqDiff/n);
printf("Maximum is : %d\n", max);
printf("Minimum is : %d\n", min);
printf("Mean is : %2f\n", average);
printf("Standard Deviation is : %2f\n", sd);
return 0;
}
|
NeighborhoodGraph.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_NG_H_
#define _SPTAG_COMMON_NG_H_
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "Dataset.h"
#include "FineGrainedLock.h"
#include "QueryResultSet.h"
#include <chrono>
#if defined(GPU)
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <typeinfo>
#include <cuda_fp16.h>
#include "inc/Core/Common/cuda/KNN.hxx"
#include "inc/Core/Common/cuda/params.h"
#endif
namespace SPTAG
{
namespace COMMON
{
class NeighborhoodGraph
{
public:
NeighborhoodGraph(): m_iTPTNumber(32),
m_iTPTLeafSize(2000),
m_iSamples(1000),
m_numTopDimensionTPTSplit(5),
m_iNeighborhoodSize(32),
m_iNeighborhoodScale(2),
m_iCEFScale(2),
m_iRefineIter(2),
m_iCEF(1000),
m_iAddCEF(500),
m_iMaxCheckForRefineGraph(10000),
m_iGPUGraphType(2),
m_iGPURefineSteps(0),
m_iGPURefineDepth(2),
m_iGPULeafSize(500),
m_iGPUBatches(1)
{}
~NeighborhoodGraph() {}
virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0;
virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0;
virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
DimensionType* correct = new DimensionType[samples];
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < samples; i++)
{
SizeType x = COMMON::Utils::rand(m_iGraphSize);
//int x = i;
COMMON::QueryResultSet<void> query(nullptr, m_iCEF);
for (SizeType y = 0; y < m_iGraphSize; y++)
{
if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue;
float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y));
query.AddPoint(y, dist);
}
query.SortResult();
SizeType * exact_rng = new SizeType[m_iNeighborhoodSize];
RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF);
correct[i] = 0;
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) {
if (exact_rng[j] == -1) {
correct[i] += m_iNeighborhoodSize - j;
break;
}
for (DimensionType k = 0; k < m_iNeighborhoodSize; k++)
if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) {
correct[i]++;
break;
}
}
delete[] exact_rng;
}
float acc = 0;
for (SizeType i = 0; i < samples; i++) acc += float(correct[i]);
acc = acc / samples / m_iNeighborhoodSize;
delete[] correct;
return acc;
}
#if defined(GPU)
template <typename T>
void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap)
{
SizeType initSize;
SPTAG::Helper::Convert::ConvertStringTo(index->GetParameter("NumberOfInitialDynamicPivots").c_str(), initSize);
// Build the entire RNG graph, both builds the KNN and refines it to RNG
buildGraph<T>(index, m_iGraphSize, m_iNeighborhoodSize, m_iTPTNumber, (int*)m_pNeighborhoodGraph[0], m_iGPURefineSteps, m_iGPURefineDepth, m_iGPUGraphType, m_iGPULeafSize, initSize);
if (idmap != nullptr) {
std::unordered_map<SizeType, SizeType>::const_iterator iter;
for (SizeType i = 0; i < m_iGraphSize; i++) {
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) {
if ((iter = idmap->find(m_pNeighborhoodGraph[i][j])) != idmap->end())
m_pNeighborhoodGraph[i][j] = iter->second;
}
}
}
}
#else
template <typename T>
void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last,
std::vector<std::pair<SizeType, SizeType>> & leaves)
{
if (last - first <= m_iTPTLeafSize)
{
leaves.emplace_back(first, last);
}
else
{
std::vector<float> Mean(index->GetFeatureDim(), 0);
int iIteration = 100;
SizeType end = min(first + m_iSamples, last);
SizeType count = end - first + 1;
// calculate the mean of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)index->GetSample(indices[j]);
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
Mean[k] += v[k];
}
}
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
Mean[k] /= count;
}
std::vector<BasicResult> Variance;
Variance.reserve(index->GetFeatureDim());
for (DimensionType j = 0; j < index->GetFeatureDim(); j++)
{
Variance.emplace_back(j, 0.0f);
}
// calculate the variance of each dimension
for (SizeType j = first; j <= end; j++)
{
const T* v = (const T*)index->GetSample(indices[j]);
for (DimensionType k = 0; k < index->GetFeatureDim(); k++)
{
float dist = v[k] - Mean[k];
Variance[k].Dist += dist*dist;
}
}
std::sort(Variance.begin(), Variance.end(), COMMON::Compare);
std::vector<SizeType> indexs(m_numTopDimensionTPTSplit);
std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit);
float bestvariance = Variance[index->GetFeatureDim() - 1].Dist;
for (int i = 0; i < m_numTopDimensionTPTSplit; i++)
{
indexs[i] = Variance[index->GetFeatureDim() - 1 - i].VID;
bestweight[i] = 0;
}
bestweight[0] = 1;
float bestmean = Mean[indexs[0]];
std::vector<float> Val(count);
for (int i = 0; i < iIteration; i++)
{
float sumweight = 0;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] = float(rand() % 10000) / 5000.0f - 1.0f;
sumweight += weight[j] * weight[j];
}
sumweight = sqrt(sumweight);
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] /= sumweight;
}
float mean = 0;
for (SizeType j = 0; j < count; j++)
{
Val[j] = 0;
const T* v = (const T*)index->GetSample(indices[first + j]);
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
Val[j] += weight[k] * v[indexs[k]];
}
mean += Val[j];
}
mean /= count;
float var = 0;
for (SizeType j = 0; j < count; j++)
{
float dist = Val[j] - mean;
var += dist * dist;
}
if (var > bestvariance)
{
bestvariance = var;
bestmean = mean;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
bestweight[j] = weight[j];
}
}
}
SizeType i = first;
SizeType j = last;
// decide which child one point belongs
while (i <= j)
{
float val = 0;
const T* v = (const T*)index->GetSample(indices[i]);
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
val += bestweight[k] * v[indexs[k]];
}
if (val < bestmean)
{
i++;
}
else
{
std::swap(indices[i], indices[j]);
j--;
}
}
// if all the points in the node are equal,equally split the node into 2
if ((i == first) || (i == last + 1))
{
i = (first + last + 1) / 2;
}
Mean.clear();
Variance.clear();
Val.clear();
indexs.clear();
weight.clear();
bestweight.clear();
PartitionByTptree<T>(index, indices, first, i - 1, leaves);
PartitionByTptree<T>(index, indices, i, last, leaves);
}
}
template <typename T>
void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap)
{
COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity);
std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize));
std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>());
for (SizeType i = 0; i < m_iGraphSize; i++)
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++)
(NeighborhoodDists)[i][j] = MaxDist;
auto t1 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition begin\n");
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < m_iTPTNumber; i++)
{
Sleep(i * 100); std::srand(clock());
for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j;
std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end());
PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]);
LOG(Helper::LogLevel::LL_Info, "Finish Getting Leaves for Tree %d\n", i);
}
LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition done\n");
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Build TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count());
for (int i = 0; i < m_iTPTNumber; i++)
{
#pragma omp parallel for schedule(dynamic)
for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++)
{
SizeType start_index = TptreeLeafNodes[i][j].first;
SizeType end_index = TptreeLeafNodes[i][j].second;
if ((j * 5) % TptreeLeafNodes[i].size() == 0) LOG(Helper::LogLevel::LL_Info, "Processing Tree %d %d%%\n", i, static_cast<int>(j * 1.0 / TptreeLeafNodes[i].size() * 100));
for (SizeType x = start_index; x < end_index; x++)
{
for (SizeType y = x + 1; y <= end_index; y++)
{
SizeType p1 = TptreeDataIndices[i][x];
SizeType p2 = TptreeDataIndices[i][y];
float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2));
if (idmap != nullptr) {
p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1);
p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2);
}
COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize);
COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize);
}
}
}
TptreeDataIndices[i].clear();
TptreeLeafNodes[i].clear();
}
TptreeDataIndices.clear();
TptreeLeafNodes.clear();
auto t3 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Process TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t2).count());
}
#endif
template <typename T>
void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
LOG(Helper::LogLevel::LL_Info, "build RNG graph!\n");
m_iGraphSize = index->GetNumSamples();
m_iNeighborhoodSize = (DimensionType)(ceil(m_iNeighborhoodSize * m_iNeighborhoodScale));
m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity);
if (m_iGraphSize < 1000) {
RefineGraph<T>(index, idmap);
LOG(Helper::LogLevel::LL_Info, "Build RNG Graph end!\n");
return;
}
auto t1 = std::chrono::high_resolution_clock::now();
BuildInitKNNGraph<T>(index, idmap);
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "BuildInitKNNGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count());
RefineGraph<T>(index, idmap);
if (idmap != nullptr) {
for (auto iter = idmap->begin(); iter != idmap->end(); iter++)
if (iter->first < 0)
{
m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second;
}
}
auto t3 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "BuildGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t1).count());
}
template <typename T>
void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
for (int iter = 0; iter < m_iRefineIter - 1; iter++)
{
auto t1 = std::chrono::high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false, false, (int)(m_iCEF * m_iCEFScale));
if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", iter, static_cast<int>(i * 1.0 / m_iGraphSize * 100));
}
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap));
}
m_iNeighborhoodSize = (DimensionType)(m_iNeighborhoodSize / m_iNeighborhoodScale);
if (m_iRefineIter > 0) {
auto t1 = std::chrono::high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false, false, m_iCEF);
if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", m_iRefineIter - 1, static_cast<int>(i * 1.0 / m_iGraphSize * 100));
}
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap));
}
}
template <typename T>
ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices,
std::shared_ptr<Helper::DiskPriorityIO> output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
std::shared_ptr<NeighborhoodGraph> tmp;
if (newGraph == nullptr) {
tmp = NeighborhoodGraph::CreateInstance(Type());
newGraph = tmp.get();
}
SizeType R = (SizeType)indices.size();
newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity);
newGraph->m_iGraphSize = R;
newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize;
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < R; i++)
{
if ((i * 5) % R == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d%%\n", static_cast<int>(i * 1.0 / R * 100));
SizeType* outnodes = newGraph->m_pNeighborhoodGraph[i];
COMMON::QueryResultSet<T> query((const T*)index->GetSample(indices[i]), m_iCEF + 1);
index->RefineSearchIndex(query, false);
RebuildNeighbors(index, indices[i], outnodes, query.GetResults(), m_iCEF + 1);
std::unordered_map<SizeType, SizeType>::const_iterator iter;
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++)
{
if (outnodes[j] >= 0 && outnodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[outnodes[j]];
if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second;
}
if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end())
outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second;
}
if (output != nullptr) newGraph->SaveGraph(output);
return ErrorCode::Success;
}
template <typename T>
void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted, int CEF)
{
COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), CEF + 1);
index->RefineSearchIndex(query, searchDeleted);
RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), CEF + 1);
if (updateNeighbors) {
// update neighbors
for (int j = 0; j <= CEF; j++)
{
BasicResult* item = query.GetResult(j);
if (item->VID < 0) break;
if (item->VID == node) continue;
InsertNeighbors(index, item->VID, node, item->Dist);
}
}
}
inline std::uint64_t BufferSize() const
{
return m_pNeighborhoodGraph.BufferSize();
}
ErrorCode LoadGraph(std::shared_ptr<Helper::DiskPriorityIO> input, SizeType blockSize, SizeType capacity)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(input, blockSize, capacity)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ret;
}
ErrorCode LoadGraph(std::string sGraphFilename, SizeType blockSize, SizeType capacity)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(sGraphFilename, blockSize, capacity)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ret;
}
ErrorCode LoadGraph(char* pGraphMemFile, SizeType blockSize, SizeType capacity)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(pGraphMemFile, blockSize, capacity)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ErrorCode::Success;
}
ErrorCode SaveGraph(std::string sGraphFilename) const
{
LOG(Helper::LogLevel::LL_Info, "Save %s To %s\n", m_pNeighborhoodGraph.Name().c_str(), sGraphFilename.c_str());
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(sGraphFilename.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile;
return SaveGraph(ptr);
}
ErrorCode SaveGraph(std::shared_ptr<Helper::DiskPriorityIO> output) const
{
IOBINARY(output, WriteBinary, sizeof(SizeType), (char*)&m_iGraphSize);
IOBINARY(output, WriteBinary, sizeof(DimensionType), (char*)&m_iNeighborhoodSize);
for (int i = 0; i < m_iGraphSize; i++)
IOBINARY(output, WriteBinary, sizeof(SizeType) * m_iNeighborhoodSize, (char*)m_pNeighborhoodGraph[i]);
LOG(Helper::LogLevel::LL_Info, "Save %s (%d,%d) Finish!\n", m_pNeighborhoodGraph.Name().c_str(), m_iGraphSize, m_iNeighborhoodSize);
return ErrorCode::Success;
}
inline ErrorCode AddBatch(SizeType num)
{
ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num);
if (ret != ErrorCode::Success) return ret;
m_iGraphSize += num;
return ErrorCode::Success;
}
inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; }
inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; }
void Update(SizeType row, DimensionType col, SizeType val) {
std::lock_guard<std::mutex> lock(m_dataUpdateLock[row]);
m_pNeighborhoodGraph[row][col] = val;
}
inline void SetR(SizeType rows) {
m_pNeighborhoodGraph.SetR(rows);
m_iGraphSize = rows;
}
inline SizeType R() const { return m_iGraphSize; }
inline std::string Type() const { return m_pNeighborhoodGraph.Name(); }
static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type);
protected:
// Graph structure
SizeType m_iGraphSize;
COMMON::Dataset<SizeType> m_pNeighborhoodGraph;
FineGrainedLock m_dataUpdateLock;
public:
int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit;
DimensionType m_iNeighborhoodSize;
float m_iNeighborhoodScale, m_iCEFScale;
int m_iRefineIter, m_iCEF, m_iAddCEF, m_iMaxCheckForRefineGraph, m_iGPUGraphType, m_iGPURefineSteps, m_iGPURefineDepth, m_iGPULeafSize, m_iGPUBatches;
};
}
}
#endif
|
GB_unop_transpose.c | //------------------------------------------------------------------------------
// GB_unop_transpose: C=op(cast(A')), transpose, typecast, and apply op
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
// Ax unused for some uses of this template
#include "GB_unused.h"
//--------------------------------------------------------------------------
// get A and C
//--------------------------------------------------------------------------
#ifndef GB_ISO_TRANSPOSE
const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ;
GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ;
#endif
//--------------------------------------------------------------------------
// C = op (cast (A'))
//--------------------------------------------------------------------------
if (Workspaces == NULL)
{
//----------------------------------------------------------------------
// A and C are both full or both bitmap
//----------------------------------------------------------------------
// A is avlen-by-avdim; C is avdim-by-avlen
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t anz = avlen * avdim ; // ignore integer overflow
const int8_t *restrict Ab = A->b ;
int8_t *restrict Cb = C->b ;
ASSERT ((Cb == NULL) == (Ab == NULL)) ;
// TODO: it would be faster to do this by tiles, not rows/columns, for
// large matrices, but in most of the cases in GraphBLAS, A and C will
// be tall-and-thin or short-and-fat.
if (Ab == NULL)
{
//------------------------------------------------------------------
// A and C are both full (no work if A and C are iso)
//------------------------------------------------------------------
#ifndef GB_ISO_TRANSPOSE
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t pC_start, pC_end ;
GB_PARTITION (pC_start, pC_end, anz, tid, nthreads) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// get i and j of the entry C(i,j)
// i = (pC % avdim) ;
// j = (pC / avdim) ;
// find the position of the entry A(j,i)
// pA = j + i * avlen
// Cx [pC] = op (Ax [pA])
GB_CAST_OP (pC, ((pC/avdim) + (pC%avdim) * avlen)) ;
}
}
#endif
}
else
{
//------------------------------------------------------------------
// A and C are both bitmap
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t pC_start, pC_end ;
GB_PARTITION (pC_start, pC_end, anz, tid, nthreads) ;
for (int64_t pC = pC_start ; pC < pC_end ; pC++)
{
// get i and j of the entry C(i,j)
// i = (pC % avdim) ;
// j = (pC / avdim) ;
// find the position of the entry A(j,i)
// pA = j + i * avlen
int64_t pA = ((pC / avdim) + (pC % avdim) * avlen) ;
int8_t cij_exists = Ab [pA] ;
Cb [pC] = cij_exists ;
#ifndef GB_ISO_TRANSPOSE
if (cij_exists)
{
// Cx [pC] = op (Ax [pA])
GB_CAST_OP (pC, pA) ;
}
#endif
}
}
}
}
else
{
//----------------------------------------------------------------------
// A is sparse or hypersparse; C is sparse
//----------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const int64_t anvec = A->nvec ;
int64_t *restrict Ci = C->i ;
if (nthreads == 1)
{
//------------------------------------------------------------------
// sequential method
//------------------------------------------------------------------
int64_t *restrict workspace = Workspaces [0] ;
for (int64_t k = 0 ; k < anvec ; k++)
{
// iterate over the entries in A(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pA_start = Ap [k] ;
int64_t pA_end = Ap [k+1] ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// C(j,i) = A(i,j)
int64_t i = Ai [pA] ;
int64_t pC = workspace [i]++ ;
Ci [pC] = j ;
#ifndef GB_ISO_TRANSPOSE
// Cx [pC] = op (Ax [pA])
GB_CAST_OP (pC, pA) ;
#endif
}
}
}
else if (nworkspaces == 1)
{
//------------------------------------------------------------------
// atomic method
//------------------------------------------------------------------
int64_t *restrict workspace = Workspaces [0] ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++)
{
// iterate over the entries in A(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pA_start = Ap [k] ;
int64_t pA_end = Ap [k+1] ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// C(j,i) = A(i,j)
int64_t i = Ai [pA] ;
// do this atomically: pC = workspace [i]++
int64_t pC ;
GB_ATOMIC_CAPTURE_INC64 (pC, workspace [i]) ;
Ci [pC] = j ;
#ifndef GB_ISO_TRANSPOSE
// Cx [pC] = op (Ax [pA])
GB_CAST_OP (pC, pA) ;
#endif
}
}
}
}
else
{
//------------------------------------------------------------------
// non-atomic method
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t *restrict workspace = Workspaces [tid] ;
for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++)
{
// iterate over the entries in A(:,j)
int64_t j = GBH (Ah, k) ;
int64_t pA_start = Ap [k] ;
int64_t pA_end = Ap [k+1] ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// C(j,i) = A(i,j)
int64_t i = Ai [pA] ;
int64_t pC = workspace [i]++ ;
Ci [pC] = j ;
#ifndef GB_ISO_TRANSPOSE
// Cx [pC] = op (Ax [pA])
GB_CAST_OP (pC, pA) ;
#endif
}
}
}
}
}
}
#undef GB_ISO_TRANSPOSE
|
dz1z1.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
void Usage(char *prog_name);
#define ACCURACY 0.01
/*
* RUCNO
*/
double sequential_solution(int argc, char *argv[])
{
long long n, i;
double factor;
double sum = 0.0;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
for (i = 0; i < n; i++)
{
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor / (2 * i + 1);
}
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return sum;
}
double parallel_solution(int argc, char *argv[])
{
long long n, i;
double factor;
double sum = 0.0;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
double last_iteration_factor;
#pragma omp parallel reduction(+ \
: sum) private(factor, i) shared(last_iteration_factor)
{
long long chunk = n / omp_get_num_threads();
long long start_i = omp_get_thread_num() * chunk;
long long end_i = start_i + chunk;
int is_last_chunk = 0;
if (omp_get_thread_num() + 1 == omp_get_num_threads())
{
end_i += n % omp_get_num_threads();
is_last_chunk = 1;
}
// for debug purposes
// printf("id: %2d, start: %10lld, end: %11lld, chunk: %10lld\n", omp_get_thread_num(), start_i, end_i, chunk);
for (i = start_i; i < end_i; i++)
{
factor = (i % 2 == 0) ? 1.0 : -1.0;
sum += factor / (2 * i + 1);
}
// substitute for lastprivate(factor)
if (is_last_chunk)
last_iteration_factor = factor;
} // parallel
factor = last_iteration_factor;
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return sum;
}
int main(int argc, char *argv[])
{
printf("---------------------Sequential execution---------------------\n");
double start_time_seq = omp_get_wtime();
double sum_seq = sequential_solution(argc, argv);
double end_time_seq = omp_get_wtime();
printf("----------------------Parallel execution----------------------\n");
double start_time_parallel = omp_get_wtime();
double sum_parallel = parallel_solution(argc, argv);
double end_time_parallel = omp_get_wtime();
printf("\nSequential elapsed time: %lfs\n", end_time_seq - start_time_seq);
printf("Parallel elapsed time: %lfs\n", end_time_parallel - start_time_parallel);
if (fabs(sum_seq - sum_parallel) < ACCURACY)
printf("Test PASSED\n");
else
printf("Test FAILED\n");
return 0;
}
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <thread_count> <n>\n", prog_name);
fprintf(stderr, " n is the number of terms and should be >= 1\n");
exit(0);
}
|
test.c | #include <stdio.h>
#include "../utilities/check.h"
#define N 100
int main()
{
check_offloading();
int a[N], aa[N];
int i, error = 0;
// initialize
for(i=0; i<N; i++)
aa[i] = a[i] = -1;
// offload
#pragma omp target map(tofrom: a[0:100])
{
int k, l;
#pragma omp simd linear(l: 2)
for(k=0; k<N; k++) {
l = 2*k;
a[k] = l;
}
}
// host
for(i=0; i<N; i++)
aa[i] = 2*i;
// check
for(i=0; i<N; i++) {
if (a[i] != aa[i])
printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error);
if (error > 10) {
printf("abort\n");
return 0;
}
}
// report
printf("done with %d errors\n", error);
return error;
}
|
GB_unop__cosh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cosh_fc32_fc32)
// op(A') function: GB (_unop_tran__cosh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = ccoshf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ccoshf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = ccoshf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COSH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cosh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ccoshf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ccoshf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cosh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
effect.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT %
% E F F E C T %
% EEE FFF FFF EEE C T %
% E F F E C T %
% EEEEE F F EEEEE CCCC T %
% %
% %
% MagickCore Image Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/blob.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/constitute.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/effect.h"
#include "magick/fx.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/montage.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/opencl-private.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/shear.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/threshold.h"
#ifdef MAGICKCORE_CLPERFMARKER
#include "CLPerfMarker.h"
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveBlurImage() adaptively blurs the image by blurring less
% intensely near image edges and more intensely far from edges. We blur the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveBlurImage() selects a suitable radius for you.
%
% The format of the AdaptiveBlurImage method is:
%
% Image *AdaptiveBlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *AdaptiveBlurImageChannel(const Image *image,
% const ChannelType channel,double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(blur_image);
}
MagickExport Image *AdaptiveBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define AdaptiveBlurImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*blur_view,
*edge_view,
*image_view;
double
**kernel,
normalize;
Image
*blur_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(blur_image);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
/*
Edge detect the image brighness channel, level, blur, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]+=(1.0-normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict r;
register IndexPacket
*magick_restrict blur_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
double
alpha,
gamma;
DoublePixelPacket
pixel;
register const double
*magick_restrict k;
register ssize_t
i,
u,
v;
gamma=0.0;
i=(ssize_t) ceil((double) width*QuantumScale*
GetPixelIntensity(edge_image,r)-0.5);
if (i < 0)
i=0;
else
if (i > (ssize_t) width)
i=(ssize_t) width;
if ((i & 0x01) != 0)
i--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y-
(ssize_t) ((width-i)/2L),width-i,width-i,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
k=kernel[i];
for (v=0; v < (ssize_t) (width-i); v++)
{
for (u=0; u < (ssize_t) (width-i); u++)
{
alpha=1.0;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
if ((channel & RedChannel) != 0)
pixel.red+=(*k)*alpha*GetPixelRed(p);
if ((channel & GreenChannel) != 0)
pixel.green+=(*k)*alpha*GetPixelGreen(p);
if ((channel & BlueChannel) != 0)
pixel.blue+=(*k)*alpha*GetPixelBlue(p);
if ((channel & OpacityChannel) != 0)
pixel.opacity+=(*k)*GetPixelOpacity(p);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u);
gamma+=(*k)*alpha;
k++;
p++;
}
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index));
q++;
r++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveSharpenImage() adaptively sharpens the image by sharpening more
% intensely near image edges and less intensely far from edges. We sharpen the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you.
%
% The format of the AdaptiveSharpenImage method is:
%
% Image *AdaptiveSharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *AdaptiveSharpenImageChannel(const Image *image,
% const ChannelType channel,double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(sharp_image);
}
MagickExport Image *AdaptiveSharpenImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define AdaptiveSharpenImageTag "Convolve/Image"
#define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma)
CacheView
*sharp_view,
*edge_view,
*image_view;
double
**kernel,
normalize;
Image
*sharp_image,
*edge_image,
*gaussian_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v,
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sharp_image=CloneImage(image,0,0,MagickTrue,exception);
if (sharp_image == (Image *) NULL)
return((Image *) NULL);
if (fabs(sigma) <= MagickEpsilon)
return(sharp_image);
if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sharp_image->exception);
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
/*
Edge detect the image brighness channel, level, sharp, and level again.
*/
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
{
sharp_image=DestroyImage(sharp_image);
return((Image *) NULL);
}
(void) AutoLevelImage(edge_image);
gaussian_image=BlurImage(edge_image,radius,sigma,exception);
if (gaussian_image != (Image *) NULL)
{
edge_image=DestroyImage(edge_image);
edge_image=gaussian_image;
}
(void) AutoLevelImage(edge_image);
/*
Create a set of kernels from maximum (radius,sigma) to minimum.
*/
width=GetOptimalKernelWidth2D(radius,sigma);
kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double **) NULL)
{
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(kernel,0,(size_t) width*sizeof(*kernel));
for (i=0; i < (ssize_t) width; i+=2)
{
kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t)
(width-i),(width-i)*sizeof(**kernel)));
if (kernel[i] == (double *) NULL)
break;
normalize=0.0;
j=(ssize_t) (width-i-1)/2;
k=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel[i][k];
k++;
}
}
kernel[i][(k-1)/2]=(double) ((-2.0)*normalize);
if (sigma < MagickEpsilon)
kernel[i][(k-1)/2]=1.0;
}
if (i < (ssize_t) width)
{
for (i-=2; i >= 0; i-=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
edge_image=DestroyImage(edge_image);
sharp_image=DestroyImage(sharp_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Adaptively sharpen image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
edge_view=AcquireVirtualCacheView(edge_image,exception);
sharp_view=AcquireAuthenticCacheView(sharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sharp_image,sharp_image->rows,1)
#endif
for (y=0; y < (ssize_t) sharp_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict r;
register IndexPacket
*magick_restrict sharp_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1,
exception);
if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view);
for (x=0; x < (ssize_t) sharp_image->columns; x++)
{
double
alpha,
gamma;
DoublePixelPacket
pixel;
register const double
*magick_restrict k;
register ssize_t
i,
u,
v;
gamma=0.0;
i=(ssize_t) ceil((double) width*(1.0-QuantumScale*
GetPixelIntensity(edge_image,r))-0.5);
if (i < 0)
i=0;
else
if (i > (ssize_t) width)
i=(ssize_t) width;
if ((i & 0x01) != 0)
i--;
p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y-
(ssize_t) ((width-i)/2L),width-i,width-i,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
k=kernel[i];
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
for (v=0; v < (ssize_t) (width-i); v++)
{
for (u=0; u < (ssize_t) (width-i); u++)
{
alpha=1.0;
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
if ((channel & RedChannel) != 0)
pixel.red+=(*k)*alpha*GetPixelRed(p);
if ((channel & GreenChannel) != 0)
pixel.green+=(*k)*alpha*GetPixelGreen(p);
if ((channel & BlueChannel) != 0)
pixel.blue+=(*k)*alpha*GetPixelBlue(p);
if ((channel & OpacityChannel) != 0)
pixel.opacity+=(*k)*GetPixelOpacity(p);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u);
gamma+=(*k)*alpha;
k++;
p++;
}
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(sharp_indexes+x,ClampToQuantum(gamma*pixel.index));
q++;
r++;
}
if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sharp_image->type=image->type;
sharp_view=DestroyCacheView(sharp_view);
edge_view=DestroyCacheView(edge_view);
image_view=DestroyCacheView(image_view);
edge_image=DestroyImage(edge_image);
for (i=0; i < (ssize_t) width; i+=2)
kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]);
kernel=(double **) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
sharp_image=DestroyImage(sharp_image);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlurImage() blurs an image. We convolve the image with a Gaussian operator
% of the given radius and standard deviation (sigma). For reasonable results,
% the radius should be larger than sigma. Use a radius of 0 and BlurImage()
% selects a suitable radius for you.
%
% The format of the BlurImage method is:
%
% Image *BlurImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *BlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception);
return(blur_image);
}
MagickExport Image *BlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
char
geometry[MaxTextExtent];
KernelInfo
*kernel_info;
Image
*blur_image = NULL;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateBlurImage(image,channel,radius,sigma,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
(void) FormatLocaleString(geometry,MaxTextExtent,
"blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma);
kernel_info=AcquireKernelInfo(geometry);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n v o l v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvolveImage() applies a custom convolution kernel to the image.
%
% The format of the ConvolveImage method is:
%
% Image *ConvolveImage(const Image *image,const size_t order,
% const double *kernel,ExceptionInfo *exception)
% Image *ConvolveImageChannel(const Image *image,const ChannelType channel,
% const size_t order,const double *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o order: the number of columns and rows in the filter kernel.
%
% o kernel: An array of double representing the convolution kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConvolveImage(const Image *image,const size_t order,
const double *kernel,ExceptionInfo *exception)
{
Image
*convolve_image;
#ifdef MAGICKCORE_CLPERFMARKER
clBeginPerfMarkerAMD(__FUNCTION__,"");
#endif
convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel,
exception);
#ifdef MAGICKCORE_CLPERFMARKER
clEndPerfMarkerAMD();
#endif
return(convolve_image);
}
MagickExport Image *ConvolveImageChannel(const Image *image,
const ChannelType channel,const size_t order,const double *kernel,
ExceptionInfo *exception)
{
Image
*convolve_image;
KernelInfo
*kernel_info;
register ssize_t
i;
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=order;
kernel_info->height=order;
kernel_info->x=(ssize_t) (order-1)/2;
kernel_info->y=(ssize_t) (order-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->width*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (order*order); i++)
kernel_info->values[i]=kernel[i];
convolve_image=(Image *) NULL;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
convolve_image=AccelerateConvolveImageChannel(image,channel,kernel_info,
exception);
#endif
if (convolve_image == (Image *) NULL)
convolve_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(convolve_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s p e c k l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DespeckleImage() reduces the speckle noise in an image while perserving the
% edges of the original image. A speckle removing filter uses a complementary
% hulling technique (raising pixels that are darker than their surrounding
% neighbors, then complementarily lowering pixels that are brighter than their
% surrounding neighbors) to reduce the speckle index of that image (reference
% Crimmins speckle removal).
%
% The format of the DespeckleImage method is:
%
% Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Hull(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,const size_t columns,const size_t rows,
const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g)
{
register Quantum
*p,
*q,
*r,
*s;
ssize_t
y;
assert(f != (Quantum *) NULL);
assert(g != (Quantum *) NULL);
p=f+(columns+2);
q=g+(columns+2);
r=p+(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
register ssize_t
i,
x;
SignedQuantum
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) p[i];
if ((SignedQuantum) r[i] >= (v+ScaleCharToQuantum(2)))
v+=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) p[i];
if ((SignedQuantum) r[i] <= (v-ScaleCharToQuantum(2)))
v-=ScaleCharToQuantum(1);
q[i]=(Quantum) v;
i++;
}
}
p=f+(columns+2);
q=g+(columns+2);
r=q+(y_offset*((ssize_t) columns+2)+x_offset);
s=q-(y_offset*((ssize_t) columns+2)+x_offset);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
register ssize_t
i,
x;
SignedQuantum
v;
i=(2*y+1)+y*columns;
if (polarity > 0)
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) q[i];
if (((SignedQuantum) s[i] >= (v+ScaleCharToQuantum(2))) &&
((SignedQuantum) r[i] > v))
v+=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
else
for (x=0; x < (ssize_t) columns; x++)
{
v=(SignedQuantum) q[i];
if (((SignedQuantum) s[i] <= (v-ScaleCharToQuantum(2))) &&
((SignedQuantum) r[i] < v))
v-=ScaleCharToQuantum(1);
p[i]=(Quantum) v;
i++;
}
}
}
MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception)
{
#define DespeckleImageTag "Despeckle/Image"
CacheView
*despeckle_view,
*image_view;
Image
*despeckle_image;
MagickBooleanType
status;
MemoryInfo
*buffer_info,
*pixel_info;
register ssize_t
i;
Quantum
*magick_restrict buffer,
*magick_restrict pixels;
size_t
length,
number_channels;
static const ssize_t
X[4] = {0, 1, 1,-1},
Y[4] = {1, 0, 1, 1};
/*
Allocate despeckled image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
despeckle_image=AccelerateDespeckleImage(image, exception);
if (despeckle_image != (Image *) NULL)
return(despeckle_image);
#endif
despeckle_image=CloneImage(image,0,0,MagickTrue,exception);
if (despeckle_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse)
{
InheritException(exception,&despeckle_image->exception);
despeckle_image=DestroyImage(despeckle_image);
return((Image *) NULL);
}
/*
Allocate image buffer.
*/
length=(size_t) ((image->columns+2)*(image->rows+2));
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
buffer_info=AcquireVirtualMemory(length,sizeof(*buffer));
if ((pixel_info == (MemoryInfo *) NULL) ||
(buffer_info == (MemoryInfo *) NULL))
{
if (buffer_info != (MemoryInfo *) NULL)
buffer_info=RelinquishVirtualMemory(buffer_info);
if (pixel_info != (MemoryInfo *) NULL)
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image=DestroyImage(despeckle_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info);
buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info);
/*
Reduce speckle in the image.
*/
status=MagickTrue;
number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4);
image_view=AcquireVirtualCacheView(image,exception);
despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception);
for (i=0; i < (ssize_t) number_channels; i++)
{
register ssize_t
k,
x;
ssize_t
j,
y;
if (status == MagickFalse)
continue;
if ((image->matte == MagickFalse) && (i == 3))
continue;
(void) memset(pixels,0,length*sizeof(*pixels));
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (i)
{
case 0: pixels[j]=GetPixelRed(p); break;
case 1: pixels[j]=GetPixelGreen(p); break;
case 2: pixels[j]=GetPixelBlue(p); break;
case 3: pixels[j]=GetPixelOpacity(p); break;
case 4: pixels[j]=GetPixelBlack(indexes+x); break;
default: break;
}
p++;
j++;
}
j++;
}
(void) memset(buffer,0,length*sizeof(*buffer));
for (k=0; k < 4; k++)
{
Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer);
Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer);
Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer);
}
j=(ssize_t) image->columns+2;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(despeckle_view);
j++;
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (i)
{
case 0: SetPixelRed(q,pixels[j]); break;
case 1: SetPixelGreen(q,pixels[j]); break;
case 2: SetPixelBlue(q,pixels[j]); break;
case 3: SetPixelOpacity(q,pixels[j]); break;
case 4: SetPixelIndex(indexes+x,pixels[j]); break;
default: break;
}
q++;
j++;
}
sync=SyncCacheViewAuthenticPixels(despeckle_view,exception);
if (sync == MagickFalse)
{
status=MagickFalse;
break;
}
j++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i,
number_channels);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
despeckle_view=DestroyCacheView(despeckle_view);
image_view=DestroyCacheView(image_view);
buffer_info=RelinquishVirtualMemory(buffer_info);
pixel_info=RelinquishVirtualMemory(pixel_info);
despeckle_image->type=image->type;
if (status == MagickFalse)
despeckle_image=DestroyImage(despeckle_image);
return(despeckle_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E d g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EdgeImage() finds edges in an image. Radius defines the radius of the
% convolution filter. Use a radius of 0 and EdgeImage() selects a suitable
% radius for you.
%
% The format of the EdgeImage method is:
%
% Image *EdgeImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EdgeImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
Image
*edge_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,0.5);
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (kernel_info->width-1)/2;
kernel_info->y=(ssize_t) (kernel_info->height-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->height*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]=(-1.0);
kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0;
edge_image=(Image *) NULL;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
edge_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info,
exception);
#endif
if (edge_image == (Image *) NULL)
edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology,
1,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(edge_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E m b o s s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EmbossImage() returns a grayscale image with a three-dimensional effect.
% We convolve the image with a Gaussian operator of the given radius and
% standard deviation (sigma). For reasonable results, radius should be
% larger than sigma. Use a radius of 0 and Emboss() selects a suitable
% radius for you.
%
% The format of the EmbossImage method is:
%
% Image *EmbossImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EmbossImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*emboss_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
k,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->width*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
j=(ssize_t) (kernel_info->width-1)/2;
k=j;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(double) (((u < 0) || (v < 0) ? -8.0 :
8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/
(2.0*MagickPI*MagickSigma*MagickSigma));
if (u != k)
kernel_info->values[i]=0.0;
i++;
}
k--;
}
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
emboss_image=(Image *) NULL;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
emboss_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info,
exception);
#endif
if (emboss_image == (Image *) NULL)
emboss_image=MorphologyImageChannel(image,DefaultChannels,
ConvolveMorphology,1,kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
if (emboss_image != (Image *) NULL)
(void) EqualizeImageChannel(emboss_image,(ChannelType)
(AllChannels &~ SyncChannels));
return(emboss_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FilterImage() applies a custom convolution kernel to the image.
%
% The format of the FilterImage method is:
%
% Image *FilterImage(const Image *image,const KernelInfo *kernel,
% ExceptionInfo *exception)
% Image *FilterImageChannel(const Image *image,const ChannelType channel,
% const KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o kernel: the filtering kernel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel,
ExceptionInfo *exception)
{
Image
*filter_image;
filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception);
return(filter_image);
}
MagickExport Image *FilterImageChannel(const Image *image,
const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception)
{
#define FilterImageTag "Filter/Image"
CacheView
*filter_view,
*image_view;
Image
*filter_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
*filter_kernel;
register ssize_t
i;
ssize_t
y;
#ifdef MAGICKCORE_CLPERFMARKER
clBeginPerfMarkerAMD(__FUNCTION__,"");
#endif
/*
Initialize filter image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((kernel->width % 2) == 0)
ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber");
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double)
kernel->height);
message=AcquireString("");
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) kernel->width; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
filter_image=AccelerateConvolveImageChannel(image,channel,kernel,exception);
if (filter_image != (Image *) NULL)
{
#ifdef MAGICKCORE_CLPERFMARKER
clEndPerfMarkerAMD();
#endif
return(filter_image);
}
#endif
filter_image=CloneImage(image,0,0,MagickTrue,exception);
if (filter_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse)
{
InheritException(exception,&filter_image->exception);
filter_image=DestroyImage(filter_image);
return((Image *) NULL);
}
/*
Normalize kernel.
*/
filter_kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*filter_kernel)));
if (filter_kernel == (MagickRealType *) NULL)
{
filter_image=DestroyImage(filter_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
filter_kernel[i]=(MagickRealType) kernel->values[i];
/*
Filter image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
filter_view=AcquireAuthenticCacheView(filter_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,filter_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict filter_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (kernel->width-1)/2L),y-
(ssize_t) ((kernel->height-1)/2L),image->columns+kernel->width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
pixel;
register const MagickRealType
*magick_restrict k;
register const PixelPacket
*magick_restrict kernel_pixels;
register ssize_t
u;
ssize_t
v;
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
k=filter_kernel;
kernel_pixels=p;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.red+=(*k)*kernel_pixels[u].red;
pixel.green+=(*k)*kernel_pixels[u].green;
pixel.blue+=(*k)*kernel_pixels[u].blue;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=filter_kernel;
kernel_pixels=p;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.opacity+=(*k)*kernel_pixels[u].opacity;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*magick_restrict kernel_indexes;
k=filter_kernel;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.index+=(*k)*GetPixelIndex(kernel_indexes+u);
k++;
}
kernel_indexes+=image->columns+kernel->width;
}
SetPixelIndex(filter_indexes+x,ClampToQuantum(pixel.index));
}
}
else
{
double
alpha,
gamma;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
GetPixelOpacity(kernel_pixels+u)));
pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels+u);
pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels+u);
pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels+u);
gamma+=(*k)*alpha;
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
if ((channel & OpacityChannel) != 0)
{
k=filter_kernel;
kernel_pixels=p;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels+u);
k++;
}
kernel_pixels+=image->columns+kernel->width;
}
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
register const IndexPacket
*magick_restrict kernel_indexes;
k=filter_kernel;
kernel_pixels=p;
kernel_indexes=indexes;
for (v=0; v < (ssize_t) kernel->width; v++)
{
for (u=0; u < (ssize_t) kernel->height; u++)
{
alpha=(MagickRealType) (QuantumScale*(QuantumRange-
kernel_pixels[u].opacity));
pixel.index+=(*k)*alpha*GetPixelIndex(kernel_indexes+u);
k++;
}
kernel_pixels+=image->columns+kernel->width;
kernel_indexes+=image->columns+kernel->width;
}
SetPixelIndex(filter_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
indexes++;
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(filter_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FilterImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
filter_image->type=image->type;
filter_view=DestroyCacheView(filter_view);
image_view=DestroyCacheView(image_view);
filter_kernel=(MagickRealType *) RelinquishAlignedMemory(filter_kernel);
if (status == MagickFalse)
filter_image=DestroyImage(filter_image);
#ifdef MAGICKCORE_CLPERFMARKER
clEndPerfMarkerAMD();
#endif
return(filter_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s i a n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussianBlurImage() blurs an image. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, the radius should be larger than sigma. Use a
% radius of 0 and GaussianBlurImage() selects a suitable radius for you
%
% The format of the GaussianBlurImage method is:
%
% Image *GaussianBlurImage(const Image *image,onst double radius,
% const double sigma,ExceptionInfo *exception)
% Image *GaussianBlurImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *GaussianBlurImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(blur_image);
}
MagickExport Image *GaussianBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
char
geometry[MaxTextExtent];
KernelInfo
*kernel_info;
Image
*blur_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) FormatLocaleString(geometry,MaxTextExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
blur_image=(Image *) NULL;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateConvolveImageChannel(image,channel,kernel_info,
exception);
#endif
if (blur_image == (Image *) NULL)
blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o t i o n B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MotionBlurImage() simulates motion blur. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma).
% For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and MotionBlurImage() selects a suitable radius for you.
% Angle gives the angle of the blurring motion.
%
% Andrew Protano contributed this effect.
%
% The format of the MotionBlurImage method is:
%
% Image *MotionBlurImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
% Image *MotionBlurImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,const double angle,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double *GetMotionBlurKernel(const size_t width,const double sigma)
{
double
*kernel,
normalize;
register ssize_t
i;
/*
Generate a 1-D convolution kernel.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
sizeof(*kernel)));
if (kernel == (double *) NULL)
return(kernel);
normalize=0.0;
for (i=0; i < (ssize_t) width; i++)
{
kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma*
MagickSigma)))/(MagickSQ2PI*MagickSigma));
normalize+=kernel[i];
}
for (i=0; i < (ssize_t) width; i++)
kernel[i]/=normalize;
return(kernel);
}
MagickExport Image *MotionBlurImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
Image
*motion_blur;
motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle,
exception);
return(motion_blur);
}
MagickExport Image *MotionBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double angle,ExceptionInfo *exception)
{
#define BlurImageTag "Blur/Image"
CacheView
*blur_view,
*image_view;
double
*kernel;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
OffsetInfo
*offset;
PointInfo
point;
register ssize_t
i;
size_t
width;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=GetMotionBlurKernel(width,sigma);
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset));
if (offset == (OffsetInfo *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
point.x=(double) width*sin(DegreesToRadians(angle));
point.y=(double) width*cos(DegreesToRadians(angle));
for (i=0; i < (ssize_t) width; i++)
{
offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5);
offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5);
}
/*
Motion blur image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateMotionBlurImage(image,channel,kernel,width,offset,
exception);
if (blur_image != (Image *) NULL)
return blur_image;
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict blur_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickPixelPacket
qixel;
PixelPacket
pixel;
register const IndexPacket
*magick_restrict indexes;
register double
*magick_restrict k;
register ssize_t
i;
k=kernel;
qixel=bias;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) width; i++)
{
(void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+
offset[i].y,&pixel,exception);
qixel.red+=(*k)*pixel.red;
qixel.green+=(*k)*pixel.green;
qixel.blue+=(*k)*pixel.blue;
qixel.opacity+=(*k)*pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*k)*(*indexes);
}
k++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(qixel.index));
}
else
{
double
alpha,
gamma;
alpha=0.0;
gamma=0.0;
for (i=0; i < (ssize_t) width; i++)
{
(void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+
offset[i].y,&pixel,exception);
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel));
qixel.red+=(*k)*alpha*pixel.red;
qixel.green+=(*k)*alpha*pixel.green;
qixel.blue+=(*k)*alpha*pixel.blue;
qixel.opacity+=(*k)*pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*k)*alpha*GetPixelIndex(indexes);
}
gamma+=(*k)*alpha;
k++;
}
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
kernel=(double *) RelinquishAlignedMemory(kernel);
offset=(OffsetInfo *) RelinquishMagickMemory(offset);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K u w a h a r a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KuwaharaImage() is an edge preserving noise reduction filter.
%
% The format of the KuwaharaImage method is:
%
% Image *KuwaharaImage(const Image *image,const double width,
% const double sigma,ExceptionInfo *exception)
% Image *KuwaharaImageChannel(const Image *image,const ChannelType channel,
% const double width,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the square window radius.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *KuwaharaImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*kuwahara_image;
kuwahara_image=KuwaharaImageChannel(image,DefaultChannels,radius,sigma,
exception);
return(kuwahara_image);
}
MagickExport Image *KuwaharaImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
#define KuwaharaImageTag "Kiwahara/Image"
CacheView
*image_view,
*kuwahara_view;
Image
*gaussian_image,
*kuwahara_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
width;
ssize_t
y;
/*
Initialize Kuwahara image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) channel;
width=(size_t) radius+1;
gaussian_image=BlurImage(image,radius,sigma,exception);
if (gaussian_image == (Image *) NULL)
return((Image *) NULL);
kuwahara_image=CloneImage(image,0,0,MagickTrue,exception);
if (kuwahara_image == (Image *) NULL)
{
gaussian_image=DestroyImage(gaussian_image);
return((Image *) NULL);
}
if (SetImageStorageClass(kuwahara_image,DirectClass) == MagickFalse)
{
InheritException(exception,&kuwahara_image->exception);
gaussian_image=DestroyImage(gaussian_image);
kuwahara_image=DestroyImage(kuwahara_image);
return((Image *) NULL);
}
/*
Edge preserving noise reduction filter.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(gaussian_image,exception);
kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,kuwahara_image,kuwahara_image->rows,1)
#endif
for (y=0; y < (ssize_t) kuwahara_image->rows; y++)
{
register IndexPacket
*magick_restrict kuwahara_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
kuwahara_indexes=GetCacheViewAuthenticIndexQueue(kuwahara_view);
for (x=0; x < (ssize_t) kuwahara_image->columns; x++)
{
double
min_variance;
MagickPixelPacket
pixel;
RectangleInfo
quadrant,
target;
register ssize_t
i;
min_variance=MagickMaximumValue;
SetGeometry(gaussian_image,&target);
quadrant.width=width;
quadrant.height=width;
for (i=0; i < 4; i++)
{
const PixelPacket
*magick_restrict p;
double
variance;
MagickPixelPacket
mean;
register const PixelPacket
*magick_restrict k;
register ssize_t
n;
quadrant.x=x;
quadrant.y=y;
switch (i)
{
case 0:
{
quadrant.x=x-(ssize_t) (width-1);
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 1:
{
quadrant.y=y-(ssize_t) (width-1);
break;
}
case 2:
{
quadrant.x=x-(ssize_t) (width-1);
break;
}
default:
break;
}
p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y,
quadrant.width,quadrant.height,exception);
if (p == (const PixelPacket *) NULL)
break;
GetMagickPixelPacket(image,&mean);
k=p;
for (n=0; n < (ssize_t) (width*width); n++)
{
mean.red+=(double) k->red;
mean.green+=(double) k->green;
mean.blue+=(double) k->blue;
k++;
}
mean.red/=(double) (width*width);
mean.green/=(double) (width*width);
mean.blue/=(double) (width*width);
k=p;
variance=0.0;
for (n=0; n < (ssize_t) (width*width); n++)
{
double
luma;
luma=GetPixelLuma(image,k);
variance+=(luma-MagickPixelLuma(&mean))*(luma-MagickPixelLuma(&mean));
k++;
}
if (variance < min_variance)
{
min_variance=variance;
target=quadrant;
}
}
if (i < 4)
{
status=MagickFalse;
break;
}
status=InterpolateMagickPixelPacket(gaussian_image,image_view,
UndefinedInterpolatePixel,(double) target.x+target.width/2.0,
(double) target.y+target.height/2.0,&pixel,exception);
if (status == MagickFalse)
break;
SetPixelPacket(kuwahara_image,&pixel,q,kuwahara_indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
kuwahara_view=DestroyCacheView(kuwahara_view);
image_view=DestroyCacheView(image_view);
gaussian_image=DestroyImage(gaussian_image);
if (status == MagickFalse)
kuwahara_image=DestroyImage(kuwahara_image);
return(kuwahara_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L o c a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LocalContrastImage() attempts to increase the appearance of large-scale
% light-dark transitions. Local contrast enhancement works similarly to
% sharpening with an unsharp mask, however the mask is instead created using
% an image with a greater blur distance.
%
% The format of the LocalContrastImage method is:
%
% Image *LocalContrastImage(const Image *image, const double radius,
% const double strength, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian blur, in percentage with 100%
% resulting in a blur radius of 20% of largest dimension.
%
% o strength: the strength of the blur mask in percentage.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LocalContrastImage(const Image *image,const double radius,
const double strength,ExceptionInfo *exception)
{
#define LocalContrastImageTag "LocalContrast/Image"
CacheView
*image_view,
*contrast_view;
float
*interImage,
*scanLinePixels,
totalWeight;
Image
*contrast_image;
MagickBooleanType
status;
MemoryInfo
*scanLinePixels_info,
*interImage_info;
ssize_t
scanLineSize,
width;
/*
Initialize contrast image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception);
if (contrast_image != (Image *) NULL)
return(contrast_image);
#endif
contrast_image=CloneImage(image,0,0,MagickTrue,exception);
if (contrast_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(contrast_image,DirectClass) == MagickFalse)
{
InheritException(exception,&contrast_image->exception);
contrast_image=DestroyImage(contrast_image);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(image,exception);
contrast_view=AcquireAuthenticCacheView(contrast_image,exception);
scanLineSize=(ssize_t) MagickMax(image->columns,image->rows);
width=(ssize_t) scanLineSize*0.002f*fabs(radius);
scanLineSize+=(2*width);
scanLinePixels_info=AcquireVirtualMemory(GetOpenMPMaximumThreads()*
scanLineSize,sizeof(*scanLinePixels));
if (scanLinePixels_info == (MemoryInfo *) NULL)
{
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info);
/*
Create intermediate buffer.
*/
interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)),
sizeof(*interImage));
if (interImage_info == (MemoryInfo *) NULL)
{
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
contrast_image=DestroyImage(contrast_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
interImage=(float *) GetVirtualMemoryBlob(interImage_info);
totalWeight=(width+1)*(width+1);
/*
Vertical pass.
*/
status=MagickTrue;
{
ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict p;
float
*out,
*pix,
*pixels;
register ssize_t
y;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
pix=pixels;
p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width),
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) image->rows+(2*width); y++)
{
*pix++=(float)GetPixelLuma(image,p);
p++;
}
out=interImage+x+width;
for (y=0; y < (ssize_t) image->rows; y++)
{
float
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+y;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* write to output */
*out=sum/totalWeight;
/* mirror into padding */
if (x <= width && x != 0)
*(out-(x*2))=*out;
if ((x > (ssize_t) image->columns-width-2) &&
(x != (ssize_t) image->columns-1))
*(out+((image->columns-x-1)*2))=*out;
out+=image->columns+(width*2);
}
}
}
/*
Horizontal pass.
*/
{
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict p;
float
*pix,
*pixels;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=scanLinePixels;
pixels+=id*scanLineSize;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+
(2*width))*sizeof(float));
for (x=0; x < (ssize_t) image->columns; x++)
{
float
mult,
srcVal,
sum,
weight;
weight=1.0f;
sum=0;
pix=pixels+x;
for (i=0; i < width; i++)
{
sum+=weight*(*pix++);
weight+=1.0f;
}
for (i=width+1; i < (2*width); i++)
{
sum+=weight*(*pix++);
weight-=1.0f;
}
/* Apply and write */
srcVal=(float) GetPixelLuma(image,p);
mult=(srcVal-(sum/totalWeight))*(strength/100.0f);
mult=(srcVal+mult)/srcVal;
SetPixelRed(q,ClampToQuantum(GetPixelRed(p)*mult));
SetPixelGreen(q,ClampToQuantum(GetPixelGreen(p)*mult));
SetPixelBlue(q,ClampToQuantum(GetPixelBlue(p)*mult));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse)
status=MagickFalse;
}
}
scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info);
interImage_info=RelinquishVirtualMemory(interImage_info);
contrast_view=DestroyCacheView(contrast_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
contrast_image=DestroyImage(contrast_image);
return(contrast_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r e v i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PreviewImage() tiles 9 thumbnails of the specified image with an image
% processing operation applied with varying parameters. This may be helpful
% pin-pointing an appropriate parameter for a particular image processing
% operation.
%
% The format of the PreviewImages method is:
%
% Image *PreviewImages(const Image *image,const PreviewType preview,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o preview: the image processing operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PreviewImage(const Image *image,const PreviewType preview,
ExceptionInfo *exception)
{
#define NumberTiles 9
#define PreviewImageTag "Preview/Image"
#define DefaultPreviewGeometry "204x204+10+10"
char
factor[MaxTextExtent],
label[MaxTextExtent];
double
degrees,
gamma,
percentage,
radius,
sigma,
threshold;
Image
*images,
*montage_image,
*preview_image,
*thumbnail;
ImageInfo
*preview_info;
MagickBooleanType
proceed;
MontageInfo
*montage_info;
QuantizeInfo
quantize_info;
RectangleInfo
geometry;
register ssize_t
i,
x;
size_t
colors;
ssize_t
y;
/*
Open output image file.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colors=2;
degrees=0.0;
gamma=(-0.2f);
preview_info=AcquireImageInfo();
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
images=NewImageList();
percentage=12.5;
GetQuantizeInfo(&quantize_info);
radius=0.0;
sigma=1.0;
threshold=0.0;
x=0;
y=0;
for (i=0; i < NumberTiles; i++)
{
thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (thumbnail == (Image *) NULL)
break;
(void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL,
(void *) NULL);
(void) SetImageProperty(thumbnail,"label",DefaultTileLabel);
if (i == (NumberTiles/2))
{
(void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception);
AppendImageToList(&images,thumbnail);
continue;
}
switch (preview)
{
case RotatePreview:
{
degrees+=45.0;
preview_image=RotateImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"rotate %g",degrees);
break;
}
case ShearPreview:
{
degrees+=5.0;
preview_image=ShearImage(thumbnail,degrees,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"shear %gx%g",
degrees,2.0*degrees);
break;
}
case RollPreview:
{
x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles;
y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles;
preview_image=RollImage(thumbnail,x,y,exception);
(void) FormatLocaleString(label,MaxTextExtent,"roll %+.20gx%+.20g",
(double) x,(double) y);
break;
}
case HuePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"100,100,%g",
2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case SaturationPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"100,%g",2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case BrightnessPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(factor,MaxTextExtent,"%g",2.0*percentage);
(void) ModulateImage(preview_image,factor);
(void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor);
break;
}
case GammaPreview:
default:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
gamma+=0.4f;
(void) GammaImageChannel(preview_image,DefaultChannels,gamma);
(void) FormatLocaleString(label,MaxTextExtent,"gamma %g",gamma);
break;
}
case SpiffPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image != (Image *) NULL)
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickTrue);
(void) FormatLocaleString(label,MaxTextExtent,"contrast (%.20g)",
(double) i+1);
break;
}
case DullPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
for (x=0; x < i; x++)
(void) ContrastImage(preview_image,MagickFalse);
(void) FormatLocaleString(label,MaxTextExtent,"+contrast (%.20g)",
(double) i+1);
break;
}
case GrayscalePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
quantize_info.colorspace=GRAYColorspace;
(void) QuantizeImage(&quantize_info,preview_image);
(void) FormatLocaleString(label,MaxTextExtent,
"-colorspace gray -colors %.20g",(double) colors);
break;
}
case QuantizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
colors<<=1;
quantize_info.number_colors=colors;
(void) QuantizeImage(&quantize_info,preview_image);
(void) FormatLocaleString(label,MaxTextExtent,"colors %.20g",(double)
colors);
break;
}
case DespecklePreview:
{
for (x=0; x < (i-1); x++)
{
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
thumbnail=DestroyImage(thumbnail);
thumbnail=preview_image;
}
preview_image=DespeckleImage(thumbnail,exception);
if (preview_image == (Image *) NULL)
break;
(void) FormatLocaleString(label,MaxTextExtent,"despeckle (%.20g)",
(double) i+1);
break;
}
case ReduceNoisePreview:
{
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,
(size_t) radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"noise %g",radius);
break;
}
case AddNoisePreview:
{
switch ((int) i)
{
case 0:
{
(void) CopyMagickString(factor,"uniform",MaxTextExtent);
break;
}
case 1:
{
(void) CopyMagickString(factor,"gaussian",MaxTextExtent);
break;
}
case 2:
{
(void) CopyMagickString(factor,"multiplicative",MaxTextExtent);
break;
}
case 3:
{
(void) CopyMagickString(factor,"impulse",MaxTextExtent);
break;
}
case 5:
{
(void) CopyMagickString(factor,"laplacian",MaxTextExtent);
break;
}
case 6:
{
(void) CopyMagickString(factor,"poisson",MaxTextExtent);
break;
}
default:
{
(void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent);
break;
}
}
preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i,
(size_t) i,exception);
(void) FormatLocaleString(label,MaxTextExtent,"+noise %s",factor);
break;
}
case SharpenPreview:
{
preview_image=SharpenImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MaxTextExtent,"sharpen %gx%g",
radius,sigma);
break;
}
case BlurPreview:
{
preview_image=BlurImage(thumbnail,radius,sigma,exception);
(void) FormatLocaleString(label,MaxTextExtent,"blur %gx%g",radius,
sigma);
break;
}
case ThresholdPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) BilevelImage(thumbnail,
(double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0);
(void) FormatLocaleString(label,MaxTextExtent,"threshold %g",
(double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0);
break;
}
case EdgeDetectPreview:
{
preview_image=EdgeImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"edge %g",radius);
break;
}
case SpreadPreview:
{
preview_image=SpreadImage(thumbnail,radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"spread %g",
radius+0.5);
break;
}
case SolarizePreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
(void) SolarizeImage(preview_image,(double) QuantumRange*
percentage/100.0);
(void) FormatLocaleString(label,MaxTextExtent,"solarize %g",
(QuantumRange*percentage)/100.0);
break;
}
case ShadePreview:
{
degrees+=10.0;
preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees,
exception);
(void) FormatLocaleString(label,MaxTextExtent,"shade %gx%g",
degrees,degrees);
break;
}
case RaisePreview:
{
RectangleInfo
raise;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
raise.width=(size_t) (2*i+2);
raise.height=(size_t) (2*i+2);
raise.x=(i-1)/2;
raise.y=(i-1)/2;
(void) RaiseImage(preview_image,&raise,MagickTrue);
(void) FormatLocaleString(label,MaxTextExtent,
"raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double)
raise.height,(double) raise.x,(double) raise.y);
break;
}
case SegmentPreview:
{
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
threshold+=0.4f;
(void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold,
threshold);
(void) FormatLocaleString(label,MaxTextExtent,"segment %gx%g",
threshold,threshold);
break;
}
case SwirlPreview:
{
preview_image=SwirlImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"swirl %g",degrees);
degrees+=45.0;
break;
}
case ImplodePreview:
{
degrees+=0.1f;
preview_image=ImplodeImage(thumbnail,degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"implode %g",degrees);
break;
}
case WavePreview:
{
degrees+=5.0f;
preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception);
(void) FormatLocaleString(label,MaxTextExtent,"wave %gx%g",
0.5*degrees,2.0*degrees);
break;
}
case OilPaintPreview:
{
preview_image=OilPaintImage(thumbnail,(double) radius,exception);
(void) FormatLocaleString(label,MaxTextExtent,"paint %g",radius);
break;
}
case CharcoalDrawingPreview:
{
preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma,
exception);
(void) FormatLocaleString(label,MaxTextExtent,"charcoal %gx%g",
radius,sigma);
break;
}
case JPEGPreview:
{
char
filename[MaxTextExtent];
int
file;
MagickBooleanType
status;
preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception);
if (preview_image == (Image *) NULL)
break;
preview_info->quality=(size_t) percentage;
(void) FormatLocaleString(factor,MaxTextExtent,"%.20g",(double)
preview_info->quality);
file=AcquireUniqueFileResource(filename);
if (file != -1)
file=close(file)-1;
(void) FormatLocaleString(preview_image->filename,MaxTextExtent,
"jpeg:%s",filename);
status=WriteImage(preview_info,preview_image);
if (status != MagickFalse)
{
Image
*quality_image;
(void) CopyMagickString(preview_info->filename,
preview_image->filename,MaxTextExtent);
quality_image=ReadImage(preview_info,exception);
if (quality_image != (Image *) NULL)
{
preview_image=DestroyImage(preview_image);
preview_image=quality_image;
}
}
(void) RelinquishUniqueFileResource(preview_image->filename);
if ((GetBlobSize(preview_image)/1024) >= 1024)
(void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%gmb ",
factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/
1024.0/1024.0);
else
if (GetBlobSize(preview_image) >= 1024)
(void) FormatLocaleString(label,MaxTextExtent,
"quality %s\n%gkb ",factor,(double) ((MagickOffsetType)
GetBlobSize(preview_image))/1024.0);
else
(void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%.20gb ",
factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail)));
break;
}
}
thumbnail=DestroyImage(thumbnail);
percentage+=12.5;
radius+=0.5;
sigma+=0.25;
if (preview_image == (Image *) NULL)
break;
(void) DeleteImageProperty(preview_image,"label");
(void) SetImageProperty(preview_image,"label",label);
AppendImageToList(&images,preview_image);
proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i,
NumberTiles);
if (proceed == MagickFalse)
break;
}
if (images == (Image *) NULL)
{
preview_info=DestroyImageInfo(preview_info);
return((Image *) NULL);
}
/*
Create the montage.
*/
montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL);
(void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent);
montage_info->shadow=MagickTrue;
(void) CloneString(&montage_info->tile,"3x3");
(void) CloneString(&montage_info->geometry,DefaultPreviewGeometry);
(void) CloneString(&montage_info->frame,DefaultTileFrame);
montage_image=MontageImages(images,montage_info,exception);
montage_info=DestroyMontageInfo(montage_info);
images=DestroyImageList(images);
if (montage_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
if (montage_image->montage != (char *) NULL)
{
/*
Free image directory.
*/
montage_image->montage=(char *) RelinquishMagickMemory(
montage_image->montage);
if (image->directory != (char *) NULL)
montage_image->directory=(char *) RelinquishMagickMemory(
montage_image->directory);
}
preview_info=DestroyImageInfo(preview_info);
return(montage_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t i o n a l B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotationalBlurImage() applies a rotational blur to the image.
%
% Andrew Protano contributed this effect.
%
% The format of the RotationalBlurImage method is:
%
% Image *RotationalBlurImage(const Image *image,const double angle,
% ExceptionInfo *exception)
% Image *RotationalBlurImageChannel(const Image *image,
% const ChannelType channel,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o angle: the angle of the rotational blur.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotationalBlurImage(const Image *image,const double angle,
ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=RotationalBlurImageChannel(image,DefaultChannels,angle,exception);
return(blur_image);
}
MagickExport Image *RotationalBlurImageChannel(const Image *image,
const ChannelType channel,const double angle,ExceptionInfo *exception)
{
CacheView
*blur_view,
*image_view;
Image
*blur_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
blur_radius,
*cos_theta,
offset,
*sin_theta,
theta;
PointInfo
blur_center;
register ssize_t
i;
size_t
n;
ssize_t
y;
/*
Allocate blur image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
blur_image=AccelerateRadialBlurImage(image,channel,angle,exception);
if (blur_image != (Image *) NULL)
return(blur_image);
#endif
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
blur_center.x=(double) (image->columns-1)/2.0;
blur_center.y=(double) (image->rows-1)/2.0;
blur_radius=hypot(blur_center.x,blur_center.y);
n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL);
theta=DegreesToRadians(angle)/(MagickRealType) (n-1);
cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n,
sizeof(*cos_theta));
sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n,
sizeof(*sin_theta));
if ((cos_theta == (MagickRealType *) NULL) ||
(sin_theta == (MagickRealType *) NULL))
{
if (cos_theta != (MagickRealType *) NULL)
cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta);
if (sin_theta != (MagickRealType *) NULL)
sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta);
blur_image=DestroyImage(blur_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
offset=theta*(MagickRealType) (n-1)/2.0;
for (i=0; i < (ssize_t) n; i++)
{
cos_theta[i]=cos((double) (theta*i-offset));
sin_theta[i]=sin((double) (theta*i-offset));
}
/*
Radial blur image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,blur_image->rows,1)
#endif
for (y=0; y < (ssize_t) blur_image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register IndexPacket
*magick_restrict blur_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) blur_image->columns; x++)
{
MagickPixelPacket
qixel;
MagickRealType
normalize,
radius;
PixelPacket
pixel;
PointInfo
center;
register ssize_t
i;
size_t
step;
center.x=(double) x-blur_center.x;
center.y=(double) y-blur_center.y;
radius=hypot((double) center.x,center.y);
if (radius == 0)
step=1;
else
{
step=(size_t) (blur_radius/radius);
if (step == 0)
step=1;
else
if (step >= n)
step=n-1;
}
normalize=0.0;
qixel=bias;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (i=0; i < (ssize_t) n; i+=(ssize_t) step)
{
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
(blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5),
(ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y*
cos_theta[i]+0.5),&pixel,exception);
qixel.red+=pixel.red;
qixel.green+=pixel.green;
qixel.blue+=pixel.blue;
qixel.opacity+=pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=(*indexes);
}
normalize+=1.0;
}
normalize=PerceptibleReciprocal(normalize);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(normalize*qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(normalize*qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(normalize*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(normalize*qixel.index));
}
else
{
double
alpha,
gamma;
alpha=1.0;
gamma=0.0;
for (i=0; i < (ssize_t) n; i+=(ssize_t) step)
{
(void) GetOneCacheViewVirtualPixel(image_view,(ssize_t)
(blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5),
(ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y*
cos_theta[i]+0.5),&pixel,exception);
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel));
qixel.red+=alpha*pixel.red;
qixel.green+=alpha*pixel.green;
qixel.blue+=alpha*pixel.blue;
qixel.opacity+=pixel.opacity;
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewVirtualIndexQueue(image_view);
qixel.index+=alpha*(*indexes);
}
gamma+=alpha;
normalize+=1.0;
}
gamma=PerceptibleReciprocal(gamma);
normalize=PerceptibleReciprocal(normalize);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*qixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*qixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index));
}
q++;
}
if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlurImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_view=DestroyCacheView(blur_view);
image_view=DestroyCacheView(image_view);
cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta);
sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e l e c t i v e B l u r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SelectiveBlurImage() selectively blur pixels within a contrast threshold.
% It is similar to the unsharpen mask that sharpens everything with contrast
% above a certain threshold.
%
% The format of the SelectiveBlurImage method is:
%
% Image *SelectiveBlurImage(const Image *image,const double radius,
% const double sigma,const double threshold,ExceptionInfo *exception)
% Image *SelectiveBlurImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o threshold: only pixels within this contrast threshold are included
% in the blur operation.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SelectiveBlurImage(const Image *image,const double radius,
const double sigma,const double threshold,ExceptionInfo *exception)
{
Image
*blur_image;
blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma,
threshold,exception);
return(blur_image);
}
MagickExport Image *SelectiveBlurImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double threshold,ExceptionInfo *exception)
{
#define SelectiveBlurImageTag "SelectiveBlur/Image"
CacheView
*blur_view,
*image_view,
*luminance_view;
double
*kernel;
Image
*blur_image,
*luminance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
register ssize_t
i;
size_t
width;
ssize_t
center,
j,
u,
v,
y;
/*
Initialize blur image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth1D(radius,sigma);
kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,
width*sizeof(*kernel)));
if (kernel == (double *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
j=(ssize_t) (width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma*
MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
register const double
*k;
ssize_t
u,
v;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double)
width);
message=AcquireString("");
k=kernel;
for (v=0; v < (ssize_t) width; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < (ssize_t) width; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%+f ",*k++);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
blur_image=CloneImage(image,0,0,MagickTrue,exception);
if (blur_image == (Image *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
return((Image *) NULL);
}
if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
InheritException(exception,&blur_image->exception);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
luminance_image=CloneImage(image,0,0,MagickTrue,exception);
if (luminance_image == (Image *) NULL)
{
kernel=(double *) RelinquishAlignedMemory(kernel);
blur_image=DestroyImage(blur_image);
return((Image *) NULL);
}
status=TransformImageColorspace(luminance_image,GRAYColorspace);
if (status == MagickFalse)
{
InheritException(exception,&luminance_image->exception);
kernel=(double *) RelinquishAlignedMemory(kernel);
blur_image=DestroyImage(blur_image);
luminance_image=DestroyImage(luminance_image);
return((Image *) NULL);
}
/*
Threshold blur image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) ((image->columns+width)*((width-1)/2L)+((width-1)/2L));
GetMagickPixelPacket(image,&bias);
SetMagickPixelPacketBias(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
luminance_view=AcquireVirtualCacheView(luminance_image,exception);
blur_view=AcquireAuthenticCacheView(blur_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,blur_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
gamma;
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict l,
*magick_restrict p;
register IndexPacket
*magick_restrict blur_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t)
((width-1)/2L),image->columns+width,width,exception);
l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y-
(ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception);
q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(l == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
contrast;
DoublePixelPacket
pixel;
MagickRealType
intensity;
register const double
*magick_restrict k;
register ssize_t
u;
ssize_t
j,
v;
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
k=kernel;
intensity=GetPixelIntensity(image,p+center);
gamma=0.0;
j=0;
if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse))
{
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
pixel.red+=(*k)*GetPixelRed(p+u+j);
pixel.green+=(*k)*GetPixelGreen(p+u+j);
pixel.blue+=(*k)*GetPixelBlue(p+u+j);
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
pixel.opacity+=(*k)*(p+u+j)->opacity;
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
gamma=PerceptibleReciprocal(gamma);
SetPixelOpacity(q,ClampToQuantum(gamma*pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
pixel.index+=(*k)*GetPixelIndex(indexes+x+u+j);
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
gamma=PerceptibleReciprocal(gamma);
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
else
{
MagickRealType
alpha;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p+u+j));
pixel.red+=(*k)*alpha*GetPixelRed(p+u+j);
pixel.green+=(*k)*alpha*GetPixelGreen(p+u+j);
pixel.blue+=(*k)*alpha*GetPixelBlue(p+u+j);
pixel.opacity+=(*k)*GetPixelOpacity(p+u+j);
gamma+=(*k)*alpha;
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
if (gamma != 0.0)
{
gamma=PerceptibleReciprocal(gamma);
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(gamma*pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(gamma*pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
pixel.opacity+=(*k)*GetPixelOpacity(p+u+j);
k++;
}
j+=(ssize_t) (image->columns+width);
}
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
gamma=0.0;
j=0;
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity;
if (fabs(contrast) < threshold)
{
alpha=(MagickRealType) (QuantumScale*
GetPixelAlpha(p+u+j));
pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+u+j);
gamma+=(*k);
}
k++;
}
j+=(ssize_t) (image->columns+width);
}
gamma=PerceptibleReciprocal(gamma);
SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index));
}
}
p++;
l++;
q++;
}
sync=SyncCacheViewAuthenticPixels(blur_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SelectiveBlurImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
blur_image->type=image->type;
blur_view=DestroyCacheView(blur_view);
luminance_view=DestroyCacheView(luminance_view);
image_view=DestroyCacheView(image_view);
luminance_image=DestroyImage(luminance_image);
kernel=(double *) RelinquishAlignedMemory(kernel);
if (status == MagickFalse)
blur_image=DestroyImage(blur_image);
return(blur_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadeImage() shines a distant light on an image to create a
% three-dimensional effect. You control the positioning of the light with
% azimuth and elevation; azimuth is measured in degrees off the x axis
% and elevation is measured in pixels above the Z axis.
%
% The format of the ShadeImage method is:
%
% Image *ShadeImage(const Image *image,const MagickBooleanType gray,
% const double azimuth,const double elevation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o gray: A value other than zero shades the intensity of each pixel.
%
% o azimuth, elevation: Define the light source direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray,
const double azimuth,const double elevation,ExceptionInfo *exception)
{
#define GetShadeIntensity(image,pixel) \
ClampPixel(GetPixelIntensity((image),(pixel)))
#define ShadeImageTag "Shade/Image"
CacheView
*image_view,
*shade_view;
Image
*linear_image,
*shade_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
light;
ssize_t
y;
/*
Initialize shaded image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
shade_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (shade_image != (Image *) NULL)
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse)
{
InheritException(exception,&shade_image->exception);
linear_image=DestroyImage(linear_image);
shade_image=DestroyImage(shade_image);
return((Image *) NULL);
}
/*
Compute the light vector.
*/
light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))*
cos(DegreesToRadians(elevation));
light.z=(double) QuantumRange*sin(DegreesToRadians(elevation));
/*
Shade image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(linear_image,exception);
shade_view=AcquireAuthenticCacheView(shade_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,shade_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
MagickRealType
distance,
normal_distance,
shade;
PrimaryInfo
normal;
register const PixelPacket
*magick_restrict p,
*magick_restrict s0,
*magick_restrict s1,
*magick_restrict s2;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3,
exception);
q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
/*
Shade this row of pixels.
*/
normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
/*
Determine the surface normal and compute shading.
*/
s0=p+1;
s1=s0+image->columns+2;
s2=s1+image->columns+2;
normal.x=(double) (GetShadeIntensity(linear_image,s0-1)+
GetShadeIntensity(linear_image,s1-1)+
GetShadeIntensity(linear_image,s2-1)-
GetShadeIntensity(linear_image,s0+1)-
GetShadeIntensity(linear_image,s1+1)-
GetShadeIntensity(linear_image,s2+1));
normal.y=(double) (GetShadeIntensity(linear_image,s2-1)+
GetShadeIntensity(linear_image,s2)+
GetShadeIntensity(linear_image,s2+1)-
GetShadeIntensity(linear_image,s0-1)-
GetShadeIntensity(linear_image,s0)-
GetShadeIntensity(linear_image,s0+1));
if ((fabs(normal.x) <= MagickEpsilon) &&
(fabs(normal.y) <= MagickEpsilon))
shade=light.z;
else
{
shade=0.0;
distance=normal.x*light.x+normal.y*light.y+normal.z*light.z;
if (distance > MagickEpsilon)
{
normal_distance=normal.x*normal.x+normal.y*normal.y+normal.z*
normal.z;
if (normal_distance > (MagickEpsilon*MagickEpsilon))
shade=distance/sqrt((double) normal_distance);
}
}
if (gray != MagickFalse)
{
SetPixelRed(q,shade);
SetPixelGreen(q,shade);
SetPixelBlue(q,shade);
}
else
{
SetPixelRed(q,ClampToQuantum(QuantumScale*shade*GetPixelRed(s1)));
SetPixelGreen(q,ClampToQuantum(QuantumScale*shade*GetPixelGreen(s1)));
SetPixelBlue(q,ClampToQuantum(QuantumScale*shade*GetPixelBlue(s1)));
}
q->opacity=s1->opacity;
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
shade_view=DestroyCacheView(shade_view);
image_view=DestroyCacheView(image_view);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
shade_image=DestroyImage(shade_image);
return(shade_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a r p e n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SharpenImage() sharpens the image. We convolve the image with a Gaussian
% operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SharpenImage() selects a suitable radius for you.
%
% Using a separable kernel would be faster, but the negative weights cancel
% out on the corners of the kernel producing often undesirable ringing in the
% filtered result; this can be avoided by using a 2D gaussian shaped image
% sharpening kernel instead.
%
% The format of the SharpenImage method is:
%
% Image *SharpenImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
% Image *SharpenImageChannel(const Image *image,const ChannelType channel,
% const double radius,const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Laplacian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SharpenImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception);
return(sharp_image);
}
MagickExport Image *SharpenImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
ExceptionInfo *exception)
{
double
gamma,
normalize;
Image
*sharp_image;
KernelInfo
*kernel_info;
register ssize_t
i;
size_t
width;
ssize_t
j,
u,
v;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
kernel_info=AcquireKernelInfo((const char *) NULL);
if (kernel_info == (KernelInfo *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(kernel_info,0,sizeof(*kernel_info));
kernel_info->width=width;
kernel_info->height=width;
kernel_info->x=(ssize_t) (width-1)/2;
kernel_info->y=(ssize_t) (width-1)/2;
kernel_info->signature=MagickCoreSignature;
kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel_info->width,kernel_info->height*sizeof(*kernel_info->values)));
if (kernel_info->values == (double *) NULL)
{
kernel_info=DestroyKernelInfo(kernel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
normalize=0.0;
j=(ssize_t) (kernel_info->width-1)/2;
i=0;
for (v=(-j); v <= j; v++)
{
for (u=(-j); u <= j; u++)
{
kernel_info->values[i]=(double) (-exp(-((double) u*u+v*v)/(2.0*
MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma));
normalize+=kernel_info->values[i];
i++;
}
}
kernel_info->values[i/2]=(double) ((-2.0)*normalize);
normalize=0.0;
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
normalize+=kernel_info->values[i];
gamma=PerceptibleReciprocal(normalize);
for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++)
kernel_info->values[i]*=gamma;
sharp_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
return(sharp_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p r e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpreadImage() is a special effects method that randomly displaces each
% pixel in a block defined by the radius parameter.
%
% The format of the SpreadImage method is:
%
% Image *SpreadImage(const Image *image,const double radius,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: Choose a random pixel in a neighborhood of this extent.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpreadImage(const Image *image,const double radius,
ExceptionInfo *exception)
{
#define SpreadImageTag "Spread/Image"
CacheView
*image_view,
*spread_view;
Image
*spread_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
RandomInfo
**magick_restrict random_info;
size_t
width;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize spread image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
spread_image=CloneImage(image,0,0,MagickTrue,exception);
if (spread_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse)
{
InheritException(exception,&spread_image->exception);
spread_image=DestroyImage(spread_image);
return((Image *) NULL);
}
/*
Spread image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(spread_image,&bias);
width=GetOptimalKernelWidth1D(radius,0.5);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
spread_view=AcquireAuthenticCacheView(spread_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,spread_image,spread_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) spread_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(spread_view);
pixel=bias;
for (x=0; x < (ssize_t) spread_image->columns; x++)
{
PointInfo
point;
point.x=GetPseudoRandomValue(random_info[id]);
point.y=GetPseudoRandomValue(random_info[id]);
status=InterpolateMagickPixelPacket(image,image_view,image->interpolate,
(double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),&pixel,
exception);
if (status == MagickFalse)
break;
SetPixelPacket(spread_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
spread_view=DestroyCacheView(spread_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
spread_image=DestroyImage(spread_image);
return(spread_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n s h a r p M a s k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnsharpMaskImage() sharpens one or more image channels. We convolve the
% image with a Gaussian operator of the given radius and standard deviation
% (sigma). For reasonable results, radius should be larger than sigma. Use a
% radius of 0 and UnsharpMaskImage() selects a suitable radius for you.
%
% The format of the UnsharpMaskImage method is:
%
% Image *UnsharpMaskImage(const Image *image,const double radius,
% const double sigma,const double amount,const double threshold,
% ExceptionInfo *exception)
% Image *UnsharpMaskImageChannel(const Image *image,
% const ChannelType channel,const double radius,const double sigma,
% const double gain,const double threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o radius: the radius of the Gaussian, in pixels, not counting the center
% pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o gain: the percentage of the difference between the original and the
% blur image that is added back into the original.
%
% o threshold: the threshold in pixels needed to apply the diffence gain.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *UnsharpMaskImage(const Image *image,const double radius,
const double sigma,const double gain,const double threshold,
ExceptionInfo *exception)
{
Image
*sharp_image;
sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,gain,
threshold,exception);
return(sharp_image);
}
MagickExport Image *UnsharpMaskImageChannel(const Image *image,
const ChannelType channel,const double radius,const double sigma,
const double gain,const double threshold,ExceptionInfo *exception)
{
#define SharpenImageTag "Sharpen/Image"
CacheView
*image_view,
*unsharp_view;
Image
*unsharp_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
bias;
MagickRealType
quantum_threshold;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
/* This kernel appears to be broken.
#if defined(MAGICKCORE_OPENCL_SUPPORT)
unsharp_image=AccelerateUnsharpMaskImage(image,channel,radius,sigma,gain,
threshold,exception);
if (unsharp_image != (Image *) NULL)
return(unsharp_image);
#endif
*/
unsharp_image=BlurImageChannel(image,(ChannelType) (channel &~ SyncChannels),
radius,sigma,exception);
if (unsharp_image == (Image *) NULL)
return((Image *) NULL);
quantum_threshold=(MagickRealType) QuantumRange*threshold;
/*
Unsharp-mask image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(image,&bias);
image_view=AcquireVirtualCacheView(image,exception);
unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,unsharp_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
DoublePixelPacket
pixel;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict unsharp_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view);
pixel.red=bias.red;
pixel.green=bias.green;
pixel.blue=bias.blue;
pixel.opacity=bias.opacity;
pixel.index=bias.index;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q);
if (fabs(2.0*pixel.red) < quantum_threshold)
pixel.red=(MagickRealType) GetPixelRed(p);
else
pixel.red=(MagickRealType) GetPixelRed(p)+(pixel.red*gain);
SetPixelRed(q,ClampToQuantum(pixel.red));
}
if ((channel & GreenChannel) != 0)
{
pixel.green=GetPixelGreen(p)-(MagickRealType) q->green;
if (fabs(2.0*pixel.green) < quantum_threshold)
pixel.green=(MagickRealType) GetPixelGreen(p);
else
pixel.green=(MagickRealType) GetPixelGreen(p)+(pixel.green*gain);
SetPixelGreen(q,ClampToQuantum(pixel.green));
}
if ((channel & BlueChannel) != 0)
{
pixel.blue=GetPixelBlue(p)-(MagickRealType) q->blue;
if (fabs(2.0*pixel.blue) < quantum_threshold)
pixel.blue=(MagickRealType) GetPixelBlue(p);
else
pixel.blue=(MagickRealType) GetPixelBlue(p)+(pixel.blue*gain);
SetPixelBlue(q,ClampToQuantum(pixel.blue));
}
if ((channel & OpacityChannel) != 0)
{
pixel.opacity=GetPixelOpacity(p)-(MagickRealType) q->opacity;
if (fabs(2.0*pixel.opacity) < quantum_threshold)
pixel.opacity=(MagickRealType) GetPixelOpacity(p);
else
pixel.opacity=GetPixelOpacity(p)+(pixel.opacity*gain);
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel.index=GetPixelIndex(indexes+x)-(MagickRealType)
GetPixelIndex(unsharp_indexes+x);
if (fabs(2.0*pixel.index) < quantum_threshold)
pixel.index=(MagickRealType) GetPixelIndex(indexes+x);
else
pixel.index=(MagickRealType) GetPixelIndex(indexes+x)+
(pixel.index*gain);
SetPixelIndex(unsharp_indexes+x,ClampToQuantum(pixel.index));
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
unsharp_image->type=image->type;
unsharp_view=DestroyCacheView(unsharp_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unsharp_image=DestroyImage(unsharp_image);
return(unsharp_image);
}
|
CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
class OpenMPIRBuilder;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
class IdentifierInfo;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
struct DependData {
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
const Expr *IteratorExpr = nullptr;
SmallVector<const Expr *, 4> DepExprs;
explicit DependData() = default;
DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
: DepKind(DepKind), IteratorExpr(IteratorExpr) {}
};
SmallVector<DependData, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
bool IsReductionWithTaskMod = false;
bool IsWorksharingReduction = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the item shared between tasks to reduce into.
const Expr *Shared = nullptr;
/// Reference to the original item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
const Expr *ReductionOp)
: Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// List of addresses of original variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedLVal Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, LValue SharedLVal,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for the shared and original reduction item.
/// \param N Number of the reduction item.
void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedLVal Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns LValue for the original reduction item.
LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqueName;
LValue IVLVal;
llvm::Function *Fn = nullptr;
bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
enum class ActionToDo {
DoNotPush,
PushAsLastprivateConditional,
DisableLastprivateConditional,
};
CodeGenModule &CGM;
ActionToDo Action = ActionToDo::DoNotPush;
/// Check and try to disable analysis of inner regions for changes in
/// lastprivate conditional.
void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
llvm::DenseSet<CanonicalDeclPtr<const Decl>>
&NeedToAddForLPCsAsDisabled) const;
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
public:
explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
LValue IVLVal);
static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// An OpenMP-IR-Builder instance.
llvm::OpenMPIRBuilder OMPBuilder;
/// Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource = nullptr;
using FlagsTy = std::pair<unsigned, unsigned>;
/// Map of flags and corresponding default locations.
using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
Address getOrCreateDefaultLocation(unsigned Flags);
QualType IdentQTy;
llvm::StructType *IdentTy = nullptr;
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Maps local variables marked as lastprivate conditional to their internal
/// types.
llvm::DenseMap<llvm::Function *,
llvm::DenseMap<CanonicalDeclPtr<const Decl>,
std::tuple<QualType, const FieldDecl *,
const FieldDecl *, LValue>>>
LastprivateConditionalToTypes;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// Type typedef struct kmp_task_affinity_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool flag1 : 1;
/// bool flag2 : 1;
/// kmp_int32 reserved : 30;
/// } flags;
/// } kmp_task_affinity_info_t;
QualType KmpTaskAffinityInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// int32_t flags;
/// int32_t reserved;
/// };
QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Atomic ordering from the omp requires directive.
llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Returns default address space for the constant firstprivates, 0 by
/// default.
virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
SourceLocation Loc);
/// Returns the number of the elements and the address of the depobj
/// dependency array.
/// \return Number of elements in depobj array and the pointer to the array of
/// dependencies.
std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
LValue DepobjLVal,
SourceLocation Loc);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Registers provided target firstprivate variable as global on the
/// target.
llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
const VarDecl *VD);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library.
llvm::Value *MapTypesArray = nullptr;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void processRequiresDirective(const OMPRequiresDecl *D);
/// Gets default memory ordering as specified in requires directive.
llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Create specialized alloca to handle lastprivate conditionals.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Checks if the lastprivate conditional was updated in inner region and
/// writes the value.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;bool Fired = false;
/// #pragma omp ... shared(a)
/// {
/// lp_a = ...;
/// Fired = true;
/// }
/// if (Fired) {
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// Fired = false;
/// }
/// \endcode
virtual void checkAndEmitSharedLastprivateConditional(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs).
/// \returns Pointer to the first element of the array casted to VoidPtr type.
std::pair<llvm::Value *, Address>
emitDependClause(CodeGenFunction &CGF,
ArrayRef<OMPTaskDataTy::DependData> Dependencies,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs) for depobj construct. In this case, the
/// variable is allocated in dynamically. \returns Pointer to the first
/// element of the array casted to VoidPtr type.
Address emitDepobjDependClause(CodeGenFunction &CGF,
const OMPTaskDataTy::DependData &Dependencies,
SourceLocation Loc);
/// Emits the code to destroy the dependency object provided in depobj
/// directive.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc);
/// Updates the dependency kind in the specified depobj object.
/// \param DepobjLVal LValue for the main depobj object.
/// \param NewDepKind New dependency kind.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
/// Initializes user defined allocators specified in the uses_allocators
/// clauses.
void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
const Expr *AllocatorTraits);
/// Destroys user defined allocators specified in the uses_allocators clause.
void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
hicoo.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include "hicoo.h"
/**
* Create a new sparse tensor in HiCOO format
* @param hitsr a pointer to an uninitialized sparse tensor
* @param nmodes number of modes the tensor will have
* @param ndims the dimension of each mode the tensor will have
* @param nnz number of nonzeros the tensor will have
*/
int sptNewSparseTensorHiCOO(
sptSparseTensorHiCOO *hitsr,
const sptIndex nmodes,
const sptIndex ndims[],
const sptNnzIndex nnz,
const sptElementIndex sb_bits,
const sptElementIndex sk_bits,
const sptElementIndex sc_bits)
{
sptIndex i;
int result;
hitsr->nmodes = nmodes;
hitsr->sortorder = malloc(nmodes * sizeof hitsr->sortorder[0]);
for(i = 0; i < nmodes; ++i) {
hitsr->sortorder[i] = i;
}
hitsr->ndims = malloc(nmodes * sizeof *hitsr->ndims);
spt_CheckOSError(!hitsr->ndims, "HiSpTns New");
memcpy(hitsr->ndims, ndims, nmodes * sizeof *hitsr->ndims);
hitsr->nnz = nnz;
/* Parameters */
hitsr->sb_bits = sb_bits; // block size by nnz
hitsr->sk_bits = sk_bits; // kernel size by nnz
hitsr->sc_bits = sc_bits; // chunk size by blocks
sptIndex sk = (sptIndex)pow(2, sk_bits);
hitsr->kschr = (sptIndexVector**)malloc(nmodes * sizeof *hitsr->kschr);
spt_CheckOSError(!hitsr->kschr, "HiSpTns New");
for(sptIndex m = 0; m < nmodes; ++m) {
sptIndex kernel_ndim = (ndims[m] + sk - 1)/sk;
hitsr->kschr[m] = (sptIndexVector*)malloc(kernel_ndim * sizeof(*(hitsr->kschr[m])));
spt_CheckOSError(!hitsr->kschr[m], "HiSpTns New");
for(sptIndex i = 0; i < kernel_ndim; ++i) {
result = sptNewIndexVector(&(hitsr->kschr[m][i]), 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
}
}
hitsr->nkiters = (sptIndex*)malloc(nmodes * sizeof *hitsr->nkiters);
result = sptNewNnzIndexVector(&hitsr->kptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
result = sptNewNnzIndexVector(&hitsr->cptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
/* Balanced structures */
hitsr->kschr_balanced = (sptIndexVector**)malloc(nmodes * sizeof *hitsr->kschr_balanced);
spt_CheckOSError(!hitsr->kschr_balanced, "HiSpTns New");
for(sptIndex m = 0; m < nmodes; ++m) {
sptIndex kernel_ndim = (ndims[m] + sk - 1)/sk;
hitsr->kschr_balanced[m] = (sptIndexVector*)malloc(kernel_ndim * sizeof(*(hitsr->kschr_balanced[m])));
spt_CheckOSError(!hitsr->kschr_balanced[m], "HiSpTns New");
for(sptIndex i = 0; i < kernel_ndim; ++i) {
result = sptNewIndexVector(&(hitsr->kschr_balanced[m][i]), 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
}
}
hitsr->kschr_balanced_pos = (sptIndexVector**)malloc(nmodes * sizeof *hitsr->kschr_balanced_pos);
spt_CheckOSError(!hitsr->kschr_balanced_pos, "HiSpTns New");
for(sptIndex m = 0; m < nmodes; ++m) {
sptIndex kernel_ndim = (ndims[m] + sk - 1)/sk;
hitsr->kschr_balanced_pos[m] = (sptIndexVector*)malloc(kernel_ndim * sizeof(*(hitsr->kschr_balanced_pos[m])));
spt_CheckOSError(!hitsr->kschr_balanced_pos[m], "HiSpTns New");
for(sptIndex i = 0; i < kernel_ndim; ++i) {
result = sptNewIndexVector(&(hitsr->kschr_balanced_pos[m][i]), 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
}
}
hitsr->nkpars = (sptIndex*)malloc(nmodes * sizeof(sptIndex));
spt_CheckOSError(!hitsr->nkpars, "HiSpTns New");
hitsr->kschr_rest = (sptIndexVector*)malloc(nmodes * sizeof *hitsr->kschr_rest);
spt_CheckOSError(!hitsr->kschr_rest, "HiSpTns New");
for(sptIndex m = 0; m < nmodes; ++m) {
result = sptNewIndexVector(&(hitsr->kschr_rest[m]), 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
}
result = sptNewNnzIndexVector(&hitsr->knnzs, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
result = sptNewNnzIndexVector(&hitsr->bptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
hitsr->binds = malloc(nmodes * sizeof *hitsr->binds);
spt_CheckOSError(!hitsr->binds, "HiSpTns New");
for(i = 0; i < nmodes; ++i) {
result = sptNewBlockIndexVector(&hitsr->binds[i], 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
}
hitsr->einds = malloc(nmodes * sizeof *hitsr->einds);
spt_CheckOSError(!hitsr->einds, "HiSpTns New");
for(i = 0; i < nmodes; ++i) {
result = sptNewElementIndexVector(&hitsr->einds[i], 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
}
result = sptNewValueVector(&hitsr->values, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
return 0;
}
/**
* Create a new sparse tensor in HiCOO format
* @param hitsr a pointer to an uninitialized sparse tensor
* @param nmodes number of modes the tensor will have
* @param ndims the dimension of each mode the tensor will have
*/
int sptNewSparseTensorHiCOO_NoNnz(
sptSparseTensorHiCOO *hitsr,
const sptIndex nmodes,
const sptIndex ndims[],
const sptElementIndex sb_bits,
const sptElementIndex sk_bits,
const sptElementIndex sc_bits)
{
sptIndex i;
int result;
hitsr->nmodes = nmodes;
hitsr->sortorder = malloc(nmodes * sizeof hitsr->sortorder[0]);
for(i = 0; i < nmodes; ++i) {
hitsr->sortorder[i] = i;
}
hitsr->ndims = malloc(nmodes * sizeof *hitsr->ndims);
spt_CheckOSError(!hitsr->ndims, "HiSpTns New");
memcpy(hitsr->ndims, ndims, nmodes * sizeof *hitsr->ndims);
/* Parameters */
hitsr->sb_bits = sb_bits; // block size by nnz
hitsr->sk_bits = sk_bits; // kernel size by nnz
hitsr->sc_bits = sc_bits; // chunk size by blocks
sptIndex sk = (sptIndex)pow(2, sk_bits);
hitsr->kschr = (sptIndexVector**)malloc(nmodes * sizeof *hitsr->kschr);
spt_CheckOSError(!hitsr->kschr, "HiSpTns New");
for(sptIndex m = 0; m < nmodes; ++m) {
sptIndex kernel_ndim = (ndims[m] + sk - 1)/sk;
hitsr->kschr[m] = (sptIndexVector*)malloc(kernel_ndim * sizeof(*(hitsr->kschr[m])));
spt_CheckOSError(!hitsr->kschr[m], "HiSpTns New");
for(sptIndex i = 0; i < kernel_ndim; ++i) {
result = sptNewIndexVector(&(hitsr->kschr[m][i]), 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
}
}
hitsr->nkiters = (sptIndex*)malloc(nmodes * sizeof *hitsr->nkiters);
result = sptNewNnzIndexVector(&hitsr->kptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
result = sptNewNnzIndexVector(&hitsr->cptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
result = sptNewNnzIndexVector(&hitsr->bptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
hitsr->binds = malloc(nmodes * sizeof *hitsr->binds);
spt_CheckOSError(!hitsr->binds, "HiSpTns New");
for(i = 0; i < nmodes; ++i) {
result = sptNewBlockIndexVector(&hitsr->binds[i], 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
}
hitsr->einds = malloc(nmodes * sizeof *hitsr->einds);
spt_CheckOSError(!hitsr->einds, "HiSpTns New");
for(i = 0; i < nmodes; ++i) {
result = sptNewElementIndexVector(&hitsr->einds[i], 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
}
result = sptNewValueVector(&hitsr->values, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
return 0;
}
/**
* Release any memory the HiCOO sparse tensor is holding
* @param hitsr the tensor to release
*/
void sptFreeSparseTensorHiCOO(sptSparseTensorHiCOO *hitsr)
{
sptIndex i;
sptIndex nmodes = hitsr->nmodes;
sptIndex sk = (sptIndex)pow(2, hitsr->sk_bits);
for(sptIndex m = 0; m < nmodes; ++m) {
sptIndex kernel_ndim = (hitsr->ndims[m] + sk - 1)/sk;
for(i = 0; i < kernel_ndim; ++i) {
sptFreeIndexVector(&(hitsr->kschr[m][i]));
}
free(hitsr->kschr[m]);
}
free(hitsr->kschr);
free(hitsr->nkiters);
sptFreeNnzIndexVector(&hitsr->kptr);
sptFreeNnzIndexVector(&hitsr->cptr);
sptFreeNnzIndexVector(&hitsr->bptr);
for(i = 0; i < nmodes; ++i) {
sptFreeBlockIndexVector(&hitsr->binds[i]);
sptFreeElementIndexVector(&hitsr->einds[i]);
}
free(hitsr->binds);
free(hitsr->einds);
sptFreeValueVector(&hitsr->values);
hitsr->nmodes = 0;
hitsr->nnz = 0;
hitsr->sb_bits = 0;
hitsr->sk_bits = 0;
hitsr->sc_bits = 0;
free(hitsr->sortorder);
free(hitsr->ndims);
}
double SparseTensorFrobeniusNormSquaredHiCOO(sptSparseTensorHiCOO const * const hitsr)
{
double norm = 0;
sptValue const * const restrict vals = hitsr->values.data;
#ifdef PARTI_USE_OPENMP
#pragma omp parallel for reduction(+:norm)
#endif
for(size_t n=0; n < hitsr->nnz; ++n) {
norm += vals[n] * vals[n];
}
return norm;
} |
task4_solution.c | #include <math.h>
#include <string.h>
#include "timer.h"
#define NN 1024
#define NM 1024
float A[NN][NM];
float Anew[NN][NM];
int main(int argc, char** argv)
{
const int n = NN;
const int m = NM;
const int iter_max = 1000;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(float));
memset(Anew, 0, n * m * sizeof(float));
for (int j = 0; j < n; j++)
{
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
StartTimer();
int iter = 0;
#pragma acc data copy(A), create(Anew)
while ( error > tol && iter < iter_max )
{
#pragma acc kernels
{
error = 0.0;
#pragma omp parallel for shared(m, n, Anew, A)
for( int j = 1; j < n-1; j++)
{
#pragma acc loop gang(8) vector(32)
for( int i = 1; i < m-1; i++ )
{
Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1]
+ A[j-1][i] + A[j+1][i]);
error = fmax( error, fabs(Anew[j][i] - A[j][i]));
}
}
#pragma omp parallel for shared(m, n, Anew, A)
for( int j = 1; j < n-1; j++)
{
#pragma acc loop gang(8) vector(32)
for( int i = 1; i < m-1; i++ )
{
A[j][i] = Anew[j][i];
}
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double runtime = GetTimer();
printf(" total: %f s\n", runtime / 1000);
}
|
GB_binop__le_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__le_int32
// A.*B function (eWiseMult): GB_AemultB__le_int32
// A*D function (colscale): GB_AxD__le_int32
// D*A function (rowscale): GB_DxB__le_int32
// C+=B function (dense accum): GB_Cdense_accumB__le_int32
// C+=b function (dense accum): GB_Cdense_accumb__le_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_int32
// C=scalar+B GB_bind1st__le_int32
// C=scalar+B' GB_bind1st_tran__le_int32
// C=A+scalar GB_bind2nd__le_int32
// C=A'+scalar GB_bind2nd_tran__le_int32
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT32 || GxB_NO_LE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__le_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__le_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__le_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__le_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__le_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__le_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__le_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__le_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__le_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__le_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__le_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bset_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bset_uint64
// A.*B function (eWiseMult): GB_AemultB__bset_uint64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bset_uint64
// C+=b function (dense accum): GB_Cdense_accumb__bset_uint64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bset_uint64
// C=scalar+B GB_bind1st__bset_uint64
// C=scalar+B' GB_bind1st_tran__bset_uint64
// C=A+scalar GB_bind2nd__bset_uint64
// C=A'+scalar GB_bind2nd_tran__bset_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = GB_BITSET (aij, bij, uint64_t, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITSET (x, y, uint64_t, 64) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT64 || GxB_NO_BSET_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bset_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bset_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bset_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bset_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bset_uint64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bset_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t bij = Bx [p] ;
Cx [p] = GB_BITSET (x, bij, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bset_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
Cx [p] = GB_BITSET (aij, y, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (x, aij, uint64_t, 64) ; \
}
GrB_Info GB_bind1st_tran__bset_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (aij, y, uint64_t, 64) ; \
}
GrB_Info GB_bind2nd_tran__bset_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
keychain_fmt_plug.c | /* Mac OS X Keychain cracker patch for JtR. Hacked together during Summer of
* 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* * (c) 2004 Matt Johnston <matt @ ucc asn au>
* This code may be freely used and modified for any purpose. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_keychain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_keychain);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "pbkdf2_hmac_sha1.h"
#include "jumbo.h"
#include "memdbg.h"
#define FORMAT_LABEL "keychain"
#define FORMAT_NAME "Mac OS X Keychain"
#define FORMAT_TAG "$keychain$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 3DES " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 3DES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*salt_struct)
#define BINARY_ALIGN 1
#define SALT_ALIGN 1
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SALTLEN 20
#define IVLEN 8
#define CTLEN 48
static struct fmt_tests keychain_tests[] = {
{"$keychain$*10f7445c8510fa40d9ef6b4e0f8c772a9d37e449*f3d19b2a45cdcccb*8c3c3b1c7d48a24dad4ccbd4fd794ca9b0b3f1386a0a4527f3548bfe6e2f1001804b082076641bbedbc9f3a7c33c084b", "password"},
// these were generated with pass_gen.pl. NOTE, they ALL have the data (which gets encrypted) which was decrypted from the above hash.
{"$keychain$*a88cd6fbaaf40bc5437eee015a0f95ab8ab70545*b12372b1b7cb5c1f*1f5c596bcdd015afc126bc86f42dd092cb9d531d14a0aafaa89283f1bebace60562d497332afbd952fd329cc864144ec", "password"},
{"$keychain$*23328e264557b93204dc825c46a25f7fb1e17d4a*19a9efde2ca98d30*6ac89184134758a95c61bd274087ae0cffcf49f433c7f91edea98bd4fd60094e2936d99e4d985dec98284379f23259c0", "hhh"},
{"$keychain$*927717d8509db73aa47c5e820e3a381928b5e048*eef33a4a1483ae45*a52691580f17e295b8c2320947968503c605b2784bfe4851077782139f0de46f71889835190c361870baa56e2f4e9e43", "JtR-Jumbo"},
{"$keychain$*1fab88d0b8ea1a3d303e0aef519796eb29e46299*3358b0e77d60892f*286f975dcd191024227514ed9939d0fa94034294ba1eca6d5c767559e75e944b5a2fcb54fd696be64c64f9d069ce628a", "really long password -----------------------------"},
/* Sample keychain from OS X El Capitan, November of 2015 */
{"$keychain$*3a473dd308b1713ddc76fc976758eb543779a228*570b762ec2b177d0*a1f491231412ff74344244db4d98b1dab6e40a8fc63a11f0d5cdabf97fce5c4fa8ae0a1f95d0398d37e3d45e9fa07aa7", "El Capitan"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned char salt[SALTLEN];
unsigned char iv[IVLEN];
unsigned char ct[CTLEN];
} *salt_struct;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p, &extra) != SALTLEN * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p, &extra) != IVLEN * 2 || extra)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */
goto err;
if (hexlenl(p, &extra) != CTLEN * 2 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
ctcopy += FORMAT_TAG_LEN; /* skip over "$keychain$*" */
salt_struct = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
p = strtokm(ctcopy, "*");
for (i = 0; i < SALTLEN; i++)
salt_struct->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < IVLEN; i++)
salt_struct->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < CTLEN; i++)
salt_struct->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)salt_struct;
}
static void set_salt(void *salt)
{
salt_struct = (struct custom_salt *)salt;
}
static int kcdecrypt(unsigned char *key, unsigned char *iv, unsigned char *data)
{
unsigned char out[CTLEN];
DES_cblock key1, key2, key3;
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
memset(out, 0, sizeof(out));
memcpy(key1, key, 8);
memcpy(key2, key + 8, 8);
memcpy(key3, key + 16, 8);
DES_set_key((DES_cblock *) key1, &ks1);
DES_set_key((DES_cblock *) key2, &ks2);
DES_set_key((DES_cblock *) key3, &ks3);
memcpy(ivec, iv, 8);
DES_ede3_cbc_encrypt(data, out, CTLEN, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
/* possible bug here, is this assumption (pad of 4) always valid? */
if (out[47] != 4 || check_pkcs_pad(out, CTLEN, 8) < 0)
return -1;
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int i;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, salt_struct->salt, SALTLEN, 1000, pout, 24, 0);
#else
pbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]), salt_struct->salt, SALTLEN, 1000, master[0], 24, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if (kcdecrypt(master[i], salt_struct->iv, salt_struct->ct) == 0)
cracked[index+i] = 1;
else
cracked[index+i] = 0;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void keychain_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_keychain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
{ NULL },
{ FORMAT_TAG },
keychain_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
keychain_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
9.norace7.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define M 200
#define N 200
double sum = 0.0;
#pragma omp threadprivate(sum)
int main() {
double A[M], B[M][N], C[N];
#pragma omp single copyprivate(sum)
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
sum += B[i][j] * C[j];
}
A[i] = sum;
sum = 0.0;
}
}
// CHECK: Region is Data Race Free.
// END
|
QLA_D3_c1_veq_V_dot_V.c | /**************** QLA_D3_c_veq_V_dot_V.c ********************/
#include <stdio.h>
#include <qla_config.h>
#include <qla_types.h>
#include <qla_random.h>
#include <qla_cmath.h>
#include <qla_d3.h>
#include <math.h>
static void start_slice(){
__asm__ __volatile__ ("");
}
static void end_slice(){
__asm__ __volatile__ ("");
}
void QLA_D3_c_veq_V_dot_V ( QLA_D_Complex *restrict r, QLA_D3_ColorVector *restrict a, QLA_D3_ColorVector *restrict b, int n)
{
start_slice();
#ifdef HAVE_XLC
#pragma disjoint(*r,*a,*b)
__alignx(16,r);
__alignx(16,a);
__alignx(16,b);
#endif
QLA_D_Complex sum;
QLA_c_eq_r(sum,0.);
#pragma omp parallel
{
QLA_D_Complex sum_local;
QLA_c_eq_r(sum_local,0.);
#pragma omp for
for(int i=0; i<n; i++) {
for(int i_c=0; i_c<3; i_c++) {
QLA_c_peq_ca_times_c(sum_local, QLA_D3_elem_V(a[i],i_c), QLA_D3_elem_V(b[i],i_c));
}
}
#pragma omp critical
{
QLA_c_peq_c(sum,sum_local);
}
}
QLA_c_eq_c(*r,sum);
end_slice();
}
|
residual_based_adjoint_bossak_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors:
//
#if !defined(KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED)
#define KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED
// System includes
#include <vector>
#include <string>
#include <unordered_set>
#include <functional>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/checks.h"
#include "includes/kratos_parameters.h"
#include "solving_strategies/schemes/scheme.h"
#include "response_functions/adjoint_response_function.h"
#include "utilities/variable_utils.h"
#include "utilities/indirect_scalar.h"
#include "utilities/adjoint_extensions.h"
namespace Kratos
{
///@name Kratos Classes
///@{
/// A scheme for dynamic adjoint equations, using Bossak time integration.
/**
* It can be used for either first- or second-order time derivatives. Elements
* and conditions must provide a specialization of AdjointExtensions via their
* data value container, which allows the scheme to operate independently of
* the variable arrangements in the element or condition.
*/
template <class TSparseSpace, class TDenseSpace>
class ResidualBasedAdjointBossakScheme : public Scheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedAdjointBossakScheme);
typedef Scheme<TSparseSpace, TDenseSpace> BaseType;
typedef typename BaseType::TSystemMatrixType SystemMatrixType;
typedef typename BaseType::TSystemVectorType SystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::DofsArrayType DofsArrayType;
///@}
///@name Life Cycle
///@{
/// Constructor.
ResidualBasedAdjointBossakScheme(
Parameters Settings,
AdjointResponseFunction::Pointer pResponseFunction
) : mpResponseFunction(pResponseFunction)
{
Parameters default_parameters(R"({
"name" : "adjoint_bossak",
"scheme_type" : "bossak",
"alpha_bossak" : -0.3
})");
Settings.ValidateAndAssignDefaults(default_parameters);
mBossak.Alpha = Settings["alpha_bossak"].GetDouble();
}
/// Destructor.
~ResidualBasedAdjointBossakScheme() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void Initialize(ModelPart& rModelPart) override
{
KRATOS_TRY;
BaseType::Initialize(rModelPart);
// Allocate auxiliary memory.
int num_threads = OpenMPUtils::GetNumThreads();
mLeftHandSide.resize(num_threads);
mResponseGradient.resize(num_threads);
mFirstDerivsLHS.resize(num_threads);
mFirstDerivsResponseGradient.resize(num_threads);
mSecondDerivsLHS.resize(num_threads);
mSecondDerivsResponseGradient.resize(num_threads);
mAdjointValuesVector.resize(num_threads);
mAdjointIndirectVector2.resize(num_threads);
mAdjointIndirectVector3.resize(num_threads);
mAuxAdjointIndirectVector1.resize(num_threads);
InitializeNodeNeighbourCount(rModelPart.Nodes());
rModelPart.GetProcessInfo()[BOSSAK_ALPHA] = mBossak.Alpha;
KRATOS_CATCH("");
}
void InitializeSolutionStep(ModelPart& rModelPart,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
const auto& r_current_process_info = rModelPart.GetProcessInfo();
mBossak = CalculateBossakConstants(mBossak.Alpha, GetTimeStep(r_current_process_info));
this->CalculateNodeNeighbourCount(rModelPart);
KRATOS_CATCH("");
}
void FinalizeSolutionStep(ModelPart& rModelPart,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb);
this->UpdateAuxiliaryVariable(rModelPart);
KRATOS_CATCH("");
}
void Update(ModelPart& rModelPart,
DofsArrayType& rDofSet,
SystemMatrixType& rA,
SystemVectorType& rDx,
SystemVectorType& rb) override
{
KRATOS_TRY;
// Update degrees of freedom: adjoint variables associated to the
// residual of the physical problem.
this->mpDofUpdater->UpdateDofs(rDofSet, rDx);
// Update adjoint variables associated to time integration.
this->UpdateTimeSchemeAdjoints(rModelPart);
KRATOS_CATCH("");
}
void CalculateSystemContributions(Element::Pointer pCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
auto& r_current_element = *pCurrentElement;
const auto k = OpenMPUtils::ThisThread();
r_current_element.GetValuesVector(mAdjointValuesVector[k]);
const auto local_size = mAdjointValuesVector[k].size();
if (rRHS_Contribution.size() != local_size)
{
rRHS_Contribution.resize(local_size, false);
}
if (rLHS_Contribution.size1() != local_size || rLHS_Contribution.size2() != local_size)
{
rLHS_Contribution.resize(local_size, local_size, false);
}
this->CheckAndResizeThreadStorage(local_size);
this->CalculateGradientContributions(r_current_element, rLHS_Contribution,
rRHS_Contribution, rCurrentProcessInfo);
this->CalculateFirstDerivativeContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
this->CalculateSecondDerivativeContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
this->CalculatePreviousTimeStepContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
this->CalculateResidualLocalContributions(
r_current_element, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
r_current_element.EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Calculate_LHS_Contribution(Element::Pointer pCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
Element::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
LocalSystemVectorType RHS_Contribution;
CalculateSystemContributions(pCurrentElement, rLHS_Contribution, RHS_Contribution,
rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Condition_CalculateSystemContributions(Condition::Pointer pCurrentCondition,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
Condition::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
// NOT TESTED !!!
pCurrentCondition->CalculateLocalSystem(
rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Condition_Calculate_LHS_Contribution(Condition::Pointer pCurrentCondition,
LocalSystemMatrixType& rLHS_Contribution,
Condition::EquationIdVectorType& rEquationId,
ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
LocalSystemVectorType RHS_Contribution;
Condition_CalculateSystemContributions(pCurrentCondition,
rLHS_Contribution, RHS_Contribution,
rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("");
}
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedAdjointBossakScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
struct BossakConstants
{
double Alpha;
double Beta;
double Gamma;
double C0;
double C1;
double C2;
double C3;
double C4;
double C5;
double C6;
double C7;
};
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
BossakConstants mBossak;
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater =
TSparseSpace::CreateDofUpdater();
AdjointResponseFunction::Pointer mpResponseFunction;
std::vector<LocalSystemMatrixType> mLeftHandSide;
std::vector<LocalSystemVectorType> mResponseGradient;
std::vector<LocalSystemMatrixType> mFirstDerivsLHS;
std::vector<LocalSystemVectorType> mFirstDerivsResponseGradient;
std::vector<LocalSystemMatrixType> mSecondDerivsLHS;
std::vector<LocalSystemVectorType> mSecondDerivsResponseGradient;
std::vector<LocalSystemVectorType> mAdjointValuesVector;
std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector2;
std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector3;
std::vector<std::vector<IndirectScalar<double>>> mAuxAdjointIndirectVector1;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
void CalculateGradientContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
rCurrentElement.CalculateLeftHandSide(mLeftHandSide[k], rCurrentProcessInfo);
this->mpResponseFunction->CalculateGradient(
rCurrentElement, mLeftHandSide[k], mResponseGradient[k], rCurrentProcessInfo);
noalias(rLHS_Contribution) = mLeftHandSide[k];
noalias(rRHS_Contribution) = -1. * mResponseGradient[k];
}
void CalculateFirstDerivativeContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
rCurrentElement.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], rCurrentProcessInfo);
mpResponseFunction->CalculateFirstDerivativesGradient(
rCurrentElement, mFirstDerivsLHS[k],
mFirstDerivsResponseGradient[k], rCurrentProcessInfo);
noalias(rLHS_Contribution) += mBossak.C6 * mFirstDerivsLHS[k];
noalias(rRHS_Contribution) -=
mBossak.C6 * mFirstDerivsResponseGradient[k];
}
void CalculateSecondDerivativeContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
auto& r_response_function = *(this->mpResponseFunction);
rCurrentElement.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rCurrentProcessInfo);
mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha);
r_response_function.CalculateSecondDerivativesGradient(
rCurrentElement, mSecondDerivsLHS[k],
mSecondDerivsResponseGradient[k], rCurrentProcessInfo);
noalias(rLHS_Contribution) += mBossak.C7 * mSecondDerivsLHS[k];
noalias(rRHS_Contribution) -=
mBossak.C7 * mSecondDerivsResponseGradient[k];
}
void CalculatePreviousTimeStepContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
const auto& r_geometry = rCurrentElement.GetGeometry();
const auto k = OpenMPUtils::ThisThread();
auto& r_extensions = *rCurrentElement.GetValue(ADJOINT_EXTENSIONS);
unsigned local_index = 0;
for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node)
{
auto& r_node = r_geometry[i_node];
r_extensions.GetFirstDerivativesVector(i_node, mAdjointIndirectVector2[k], 1);
r_extensions.GetSecondDerivativesVector(i_node, mAdjointIndirectVector3[k], 1);
r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1);
const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS);
for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d)
{
rRHS_Contribution[local_index] +=
weight *
(mBossak.C7 * mAuxAdjointIndirectVector1[k][d] +
mBossak.C4 * mAdjointIndirectVector2[k][d] +
mBossak.C5 * mAdjointIndirectVector3[k][d]);
++local_index;
}
}
}
void CalculateResidualLocalContributions(Element& rCurrentElement,
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemVectorType& rRHS_Contribution,
ProcessInfo& rCurrentProcessInfo)
{
int k = OpenMPUtils::ThisThread();
auto& r_residual_adjoint = mAdjointValuesVector[k];
rCurrentElement.GetValuesVector(r_residual_adjoint);
noalias(rRHS_Contribution) -= prod(rLHS_Contribution, r_residual_adjoint);
}
void InitializeNodeNeighbourCount(ModelPart::NodesContainerType& rNodes)
{
// This loop should not be omp parallel
// The operation is not threadsafe if the value is uninitialized
for (auto& r_node : rNodes)
r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0);
}
void CalculateNodeNeighbourCount(ModelPart& rModelPart)
{
// Calculate number of neighbour elements for each node.
const int num_nodes = rModelPart.NumberOfNodes();
#pragma omp parallel for
for (int i = 0; i < num_nodes; ++i)
{
Node<3>& r_node = *(rModelPart.Nodes().begin() + i);
r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0);
}
const int num_elements = rModelPart.NumberOfElements();
#pragma omp parallel for
for (int i = 0; i < num_elements; ++i)
{
Element& r_element = *(rModelPart.Elements().begin() + i);
Geometry<Node<3>>& r_geometry = r_element.GetGeometry();
for (unsigned j = 0; j < r_geometry.PointsNumber(); ++j)
{
double& r_num_neighbour =
r_geometry[j].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS);
#pragma omp atomic
r_num_neighbour += 1.0;
}
}
rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS);
}
void UpdateTimeSchemeAdjoints(ModelPart& rModelPart)
{
KRATOS_TRY;
auto lambda2_vars = GatherVariables(
rModelPart.Elements(), [](const AdjointExtensions& rExtensions,
std::vector<const VariableData*>& rVec) {
rExtensions.GetFirstDerivativesVariables(rVec);
});
auto lambda3_vars = GatherVariables(
rModelPart.Elements(), [](const AdjointExtensions& rExtensions,
std::vector<const VariableData*>& rVec) {
return rExtensions.GetSecondDerivativesVariables(rVec);
});
SetToZero_AdjointVars(lambda2_vars, rModelPart.Nodes());
SetToZero_AdjointVars(lambda3_vars, rModelPart.Nodes());
const int number_of_elements = rModelPart.NumberOfElements();
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
Vector adjoint2_aux, adjoint3_aux;
std::vector<IndirectScalar<double>> adjoint2_old, adjoint3_old;
#pragma omp parallel for private(adjoint2_aux, adjoint3_aux, adjoint2_old, adjoint3_old)
for (int i = 0; i < number_of_elements; ++i)
{
Element& r_element = *(rModelPart.ElementsBegin() + i);
const int k = OpenMPUtils::ThisThread();
r_element.GetValuesVector(mAdjointValuesVector[k]);
this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size());
r_element.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], r_process_info);
this->mpResponseFunction->CalculateFirstDerivativesGradient(
r_element, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], r_process_info);
r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info);
mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha);
this->mpResponseFunction->CalculateSecondDerivativesGradient(
r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info);
if (adjoint2_aux.size() != mFirstDerivsResponseGradient[k].size())
adjoint2_aux.resize(mFirstDerivsResponseGradient[k].size(), false);
noalias(adjoint2_aux) = -mFirstDerivsResponseGradient[k] -
prod(mFirstDerivsLHS[k], mAdjointValuesVector[k]);
if (adjoint3_aux.size() != mSecondDerivsResponseGradient[k].size())
adjoint3_aux.resize(mSecondDerivsResponseGradient[k].size(), false);
noalias(adjoint3_aux) = -mSecondDerivsResponseGradient[k] -
prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]);
auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS);
// Assemble the contributions to the corresponding nodal unknowns.
unsigned local_index = 0;
Geometry<Node<3>>& r_geometry = r_element.GetGeometry();
for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node)
{
r_extensions.GetFirstDerivativesVector(
i_node, mAdjointIndirectVector2[k], 0);
r_extensions.GetSecondDerivativesVector(
i_node, mAdjointIndirectVector3[k], 0);
r_extensions.GetFirstDerivativesVector(i_node, adjoint2_old, 1);
r_extensions.GetSecondDerivativesVector(i_node, adjoint3_old, 1);
r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1);
Node<3>& r_node = r_geometry[i_node];
const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS);
r_node.SetLock();
for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d)
{
mAdjointIndirectVector2[k][d] += adjoint2_aux[local_index];
mAdjointIndirectVector2[k][d] += mBossak.C0 * weight * adjoint2_old[d];
mAdjointIndirectVector2[k][d] += mBossak.C1 * weight * adjoint3_old[d];
mAdjointIndirectVector3[k][d] += adjoint3_aux[local_index];
mAdjointIndirectVector3[k][d] += mBossak.C2 * weight * adjoint2_old[d];
mAdjointIndirectVector3[k][d] += mBossak.C3 * weight * adjoint3_old[d];
mAdjointIndirectVector3[k][d] +=
weight * mAuxAdjointIndirectVector1[k][d];
++local_index;
}
r_node.UnSetLock();
}
}
// Finalize global assembly
Assemble_AdjointVars(lambda2_vars, rModelPart.GetCommunicator());
Assemble_AdjointVars(lambda3_vars, rModelPart.GetCommunicator());
KRATOS_CATCH("");
}
void UpdateAuxiliaryVariable(ModelPart& rModelPart)
{
KRATOS_TRY;
auto aux_vars = GatherVariables(
rModelPart.Elements(), [](const AdjointExtensions& rExtensions,
std::vector<const VariableData*>& rOut) {
return rExtensions.GetAuxiliaryVariables(rOut);
});
SetToZero_AdjointVars(aux_vars, rModelPart.Nodes());
// Loop over elements to assemble the remaining terms
const int number_of_elements = rModelPart.NumberOfElements();
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
Vector aux_adjoint_vector;
#pragma omp parallel for private(aux_adjoint_vector)
for (int i = 0; i < number_of_elements; ++i)
{
Element& r_element = *(rModelPart.ElementsBegin() + i);
const int k = OpenMPUtils::ThisThread();
r_element.GetValuesVector(mAdjointValuesVector[k]);
this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size());
r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info);
mSecondDerivsLHS[k] *= mBossak.Alpha;
this->mpResponseFunction->CalculateSecondDerivativesGradient(
r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info);
if (aux_adjoint_vector.size() != mSecondDerivsLHS[k].size1())
aux_adjoint_vector.resize(mSecondDerivsLHS[k].size1(), false);
noalias(aux_adjoint_vector) =
prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]) +
mSecondDerivsResponseGradient[k];
auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS);
// Assemble the contributions to the corresponding nodal unknowns.
unsigned local_index = 0;
Geometry<Node<3>>& r_geometry = r_element.GetGeometry();
for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node)
{
Node<3>& r_node = r_geometry[i_node];
r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 0);
r_node.SetLock();
for (unsigned d = 0; d < mAuxAdjointIndirectVector1[k].size(); ++d)
{
mAuxAdjointIndirectVector1[k][d] -= aux_adjoint_vector[local_index];
++local_index;
}
r_node.UnSetLock();
}
}
// Finalize global assembly
Assemble_AdjointVars(aux_vars, rModelPart.GetCommunicator());
KRATOS_CATCH("");
}
void CheckAndResizeThreadStorage(unsigned SystemSize)
{
const int k = OpenMPUtils::ThisThread();
if (mLeftHandSide[k].size1() != SystemSize || mLeftHandSide[k].size2() != SystemSize)
{
mLeftHandSide[k].resize(SystemSize, SystemSize, false);
}
if (mFirstDerivsLHS[k].size1() != SystemSize || mFirstDerivsLHS[k].size2() != SystemSize)
{
mFirstDerivsLHS[k].resize(SystemSize, SystemSize, false);
}
if (mSecondDerivsLHS[k].size1() != SystemSize || mSecondDerivsLHS[k].size2() != SystemSize)
{
mSecondDerivsLHS[k].resize(SystemSize, SystemSize, false);
}
if (mResponseGradient[k].size() != SystemSize)
{
mResponseGradient[k].resize(SystemSize, false);
}
if (mFirstDerivsResponseGradient[k].size() != SystemSize)
{
mFirstDerivsResponseGradient[k].resize(SystemSize, false);
}
if (mSecondDerivsResponseGradient[k].size() != SystemSize)
{
mSecondDerivsResponseGradient[k].resize(SystemSize, false);
}
}
static BossakConstants CalculateBossakConstants(double Alpha, double DeltaTime)
{
BossakConstants bc;
bc.Alpha = Alpha;
bc.Beta = 0.25 * (1.0 - bc.Alpha) * (1.0 - bc.Alpha);
bc.Gamma = 0.5 - bc.Alpha;
bc.C0 = 1.0 - bc.Gamma / bc.Beta;
bc.C1 = -1.0 / (bc.Beta * DeltaTime);
bc.C2 = (1.0 - 0.5 * bc.Gamma / bc.Beta) * DeltaTime;
bc.C3 = (1.0 - 0.5 / bc.Beta);
bc.C4 = (bc.Beta - bc.Gamma * (bc.Gamma + 0.5)) / (DeltaTime * bc.Beta * bc.Beta);
bc.C5 = -1.0 * (bc.Gamma + 0.5) / (DeltaTime * DeltaTime * bc.Beta * bc.Beta);
bc.C6 = bc.Gamma / (bc.Beta * DeltaTime);
bc.C7 = 1.0 / (DeltaTime * DeltaTime * bc.Beta);
return bc;
}
static double GetTimeStep(const ProcessInfo& rCurrentProcessInfo)
{
const ProcessInfo& r_last_process_info =
rCurrentProcessInfo.GetPreviousSolutionStepInfo(1);
// Note: solution is backwards in time, but we still want a positive
// time step
// (it is the time step in the "forward" Bossak scheme).
double time_step =
r_last_process_info.GetValue(TIME) - rCurrentProcessInfo.GetValue(TIME);
KRATOS_ERROR_IF(time_step <= 0.0)
<< "Backwards in time solution is not decreasing time from last "
"step."
<< std::endl;
return time_step;
}
struct Hash
{
std::size_t operator()(const VariableData* const& p) const
{
return p->Key();
}
};
struct Pred
{
bool operator()(const VariableData* const l, const VariableData* const r) const
{
return *l == *r;
}
};
// Gathers variables needed for assembly.
static std::vector<const VariableData*> GatherVariables(
const ModelPart::ElementsContainerType& rElements,
std::function<void(const AdjointExtensions&, std::vector<const VariableData*>&)> GetLocalVars)
{
KRATOS_TRY;
const int num_threads = OpenMPUtils::GetNumThreads();
std::vector<const VariableData*> local_vars;
std::vector<std::unordered_set<const VariableData*, Hash, Pred>> thread_vars(num_threads);
#pragma omp parallel for private(local_vars)
for (int i = 0; i < static_cast<int>(rElements.size()); ++i)
{
auto& r_element = *(rElements.begin() + i);
GetLocalVars(*r_element.GetValue(ADJOINT_EXTENSIONS), local_vars);
const int k = OpenMPUtils::ThisThread();
thread_vars[k].insert(local_vars.begin(), local_vars.end());
}
std::unordered_set<const VariableData*, Hash, Pred> all_vars;
for (int i = 0; i < num_threads; ++i)
{
all_vars.insert(thread_vars[i].begin(), thread_vars[i].end());
}
return std::vector<const VariableData*>{all_vars.begin(), all_vars.end()};
KRATOS_CATCH("");
}
static void SetToZero_AdjointVars(const std::vector<const VariableData*>& rVariables,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY;
for (auto p_variable_data : rVariables)
{
if (KratosComponents<Variable<array_1d<double, 3>>>::Has(
p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<array_1d<double, 3>>>::Get(
p_variable_data->Name());
VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes);
}
else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<double>>::Get(p_variable_data->Name());
VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes);
}
else
{
KRATOS_ERROR << "Variable \"" << p_variable_data->Name()
<< "\" not found!\n";
}
}
KRATOS_CATCH("");
}
static void Assemble_AdjointVars(const std::vector<const VariableData*>& rVariables,
Communicator& rComm)
{
KRATOS_TRY;
for (auto p_variable_data : rVariables)
{
if (KratosComponents<Variable<array_1d<double, 3>>>::Has(
p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<array_1d<double, 3>>>::Get(
p_variable_data->Name());
rComm.AssembleCurrentData(r_variable);
}
else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name()))
{
const auto& r_variable =
KratosComponents<Variable<double>>::Get(p_variable_data->Name());
rComm.AssembleCurrentData(r_variable);
}
else
{
KRATOS_ERROR << "Variable \"" << p_variable_data->Name()
<< "\" not found!\n";
}
}
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedAdjointBossakScheme */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED defined */
|
OMPDenseMatrix.c | #include <omp.h>
#include <stdlib.h>
#include <stdio.h>
int main (int argc, char* argv[]) {
//Declarations
float dampingFactor = 0.15;
int numPage = atoi(argv[1]);
int totalSize = numPage * numPage;
float *sArray, *pageRank, *yArray;
sArray = (float*)malloc(totalSize*sizeof(float));
pageRank = (float*)malloc(numPage*sizeof(float));
yArray = (float*)malloc(numPage*sizeof(float));
int i, j, K, k;
K = 1000;
double startTime, endTime;
//fill out sArray with 0 and
//Initial Pagerank vector with 1/Numpage
for(i = 0; i < numPage; i++) {
for (j = 0; j < numPage; j++) {
sArray[i*numPage + j] = 0.0;
}
pageRank[i] = 1/(float)numPage;
}
//setup sArray with page navigation probabilities
sArray[1] = 0.5;
sArray[numPage - 1] = 0.5;
for (i = 1; i < numPage - 1; i++) {
for(j = 0; j < numPage; j++) {
if (i == 1) {
sArray[i*numPage] = 1.0;
sArray[i*numPage+2] = 0.5;
j = numPage;
}
else {
if (j == i) {
sArray[(i*numPage) + (j - 1)] = 0.5;
sArray[(i*numPage) + (j + 1)] = 0.5;
j = numPage;
}
}
}
}
sArray[totalSize - 2] = 0.5;
//Apply damping factor to the sArray
for (i = 0; i < numPage; i++) {
for (j = 0; j < numPage; j++) {
sArray[i*numPage+j] = ((1-dampingFactor)*sArray[i*numPage+j])+(dampingFactor/numPage);
}
}
//start timer and perform MatVec K-times in parallel
startTime = omp_get_wtime();
for (k = 0; k < K; k++) {
#pragma omp parallel for private(j)
for (i = 0; i < numPage; i++) {
yArray[i] = 0.0;
for (j = 0; j < numPage; j++) {
yArray[i] += sArray[i*numPage+j] * pageRank[j];
}
}
#pragma omp master
for (i = 0; i < numPage; i++) {
pageRank[i] = yArray[i];
}
#pragma end master
}
endTime = omp_get_wtime();
//Print the Pageranks or max and min values
if (numPage < 20) {
for (i = 0; i < numPage; i++) {
printf("%f \n", pageRank[i]);
}
}
else {
float max, min;
max = pageRank[0];
min = pageRank[0];
for (i = 0; i < numPage; i++) {
if (max < pageRank[i])
max = pageRank[i];
if (min > pageRank[i])
min = pageRank[i];
}
printf("Min Pagerank = %f \n", min);
printf("Max Pagerank = %f \n", max);
}
//print runtime
printf("RUNTIME = %.16f\n", endTime-startTime);
return 0;
} |
guided.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <unistd.h>
#define N 1000
int main()
{
int n_threads, i;
/*
Schedule allows you to create the scheme with which
the threads distribute the work of an iteration of a cycle.
"guided": It has a scheduling policy very similar to dynamic mode,
except that the chunk size changes during program execution.
*/
#pragma omp parallel for private(i) schedule(guided) num_threads(4)
for(i=0; i<N; i++)
{
//wait i second
sleep(i);
printf("The thread %d has completed the iteration %d\n", omp_get_thread_num(), i);
}
printf("All threads have ended!!\n");
return 0;
}
|
test.c |
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char *argv[])
{
int dep = 0;
#pragma omp target device(0) nowait map(tofrom: dep) depend(out: dep)
{
dep++;
}
#pragma omp target device(1) nowait map(tofrom: dep) depend(in: dep)
{
dep++;
}
#pragma omp taskwait
if (dep == 2) {
printf("completed with 0 errors\n");
} else {
printf("completed with a error:\n");
printf("dep should be 2, but is %d\n", dep);
}
return EXIT_SUCCESS;
}
|
dirfile.c | /* -*- mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: sw=4 ts=8 et tw=80
*/
#include "pyactpol.h"
#include <actpol/actpol.h>
#include <actpol/dirfile.h>
#include <actpol/getdata.h>
#include "myassert.h"
/* Extraction / conversion routines, for pulling bitfields out and
* rescaling them. */
static
int extract_float32_int32(float *dest, int32_t *src, long n,
long mask, float scale);
static
int extract_float32_uint32(float *dest, uint32_t *src, long n,
long mask, float scale);
/* ptrobj
*
* This is a generic object for encapsulating a C pointer, so it can
* be passed back and forth between C and python layers. If this is
* abused it will certainly segfault and/or leak memory. Maybe not in
* that order. Proper handling should ensure that copies of such
* objects are associated with exactly one python object, and
* automatically destroyed (using an appropriate destructor) when the
* owner dies (i.e. through the owner's __del__ method).
*
* There are better ways to handle this but this works in simple
* cases. SWIG handles this automatically, if you set it up exactly
* right, which is not easy.
*
*/
static PyTypeObject
ptrobjType = {
PyObject_HEAD_INIT(NULL)
#if PY_MAJOR_VERSION >= 3
#else
0, /* ob_size */
#endif
"ptrobj", /* tp_name */
sizeof(pyactpol_ptrobj), /* tp_basicsize */
0, /* tp_itemsize */
0, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags*/
"ptrobj object", /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
PyType_GenericNew, /* tp_new */
0, /* tp_free */
0, /* tp_is_gc */
0, /* tp_bases */
0, /* tp_mro */
0, /* tp_cache */
0, /* tp_subclasses */
0, /* tp_weaklist */
};
/* ptrobj_decode
*
* For use in Py_ParseTuple -- assuming that the passed object is
* really a ptrobj, extract the encapsulated pointer and store it in
* dest.
*/
int pyactpol_ptrobj_decode(PyObject *o, void **dest)
{
*dest = ((pyactpol_ptrobj*)o)->p;
return 1;
}
/* ptrobj_new
*
* Create a new ptrobj, with data initialized to the "item".
*/
pyactpol_ptrobj *pyactpol_ptrobj_new(void *item)
{
pyactpol_ptrobj* p = PyObject_New(pyactpol_ptrobj, &ptrobjType);
p->p = item;
return p;
}
/******
* dirfile access
*
* Wraps libactpol calls.
*/
static PyObject *dirfile_open(PyObject *self, PyObject *args)
{
char *filename;
if (!PyArg_ParseTuple(args, "s",
&filename))
return NULL;
ACTpolDirfile *df = ACTpolDirfile_open(filename);
if (df == NULL)
Py_RETURN_NONE;
return (PyObject*) pyactpol_ptrobj_new(df);
}
static PyObject *dirfile_close(PyObject *self, PyObject *args)
{
ACTpolDirfile *df;
if (!PyArg_ParseTuple(args, "O&",
pyactpol_ptrobj_decode, &df))
return NULL;
ACTpolDirfile_close(df);
Py_RETURN_NONE;
}
/*
* dirfile_get_frame_count
*
* Count frames and partial frames in a dirfile.
*
* In:
* dirfile (dirfile_object)
* channel_probe (string, possibly null) - channel to use to count partial
* frames
*
* Out:
* n_full_frames (long)
* samples_per_frame (long)
* n_trailing_samples (long)
*/
static PyObject *dirfile_get_frame_count(PyObject *self, PyObject *args)
{
ACTpolDirfile *dirfile;
char *channel;
if (!PyArg_ParseTuple(args, "O&s",
&pyactpol_ptrobj_decode, &dirfile,
&channel))
Py_RETURN_NONE;
po_assert(dirfile != NULL && channel != NULL);
// Number of complete frames
int status;
long n_frames = GetNFrames(dirfile->format, &status, NULL);
// Number of partial frames, as determined from channel.
long spf = 0;
long n_partial = 0;
if (channel != NULL) {
// Check the channel length, then ask for more.
// You should use a real data channel, not INDEX.
spf = GetSamplesPerFrame(dirfile->format, channel, &status);
char *data = malloc((n_frames+1) * spf * sizeof(char));
long nout = GetData(dirfile->format, channel,
0,0,
n_frames, spf-1,
'c', data,
&status);
n_partial = nout - n_frames*spf;
free(data);
}
return Py_BuildValue("lll",
n_frames,
spf,
n_partial);
}
/*
* dirfile_get_channel_info
*
* Check for channel existence and get basic channel information.
*
* In:
* dirfile (dirfile_object)
* channel name (string)
*
* Out:
* exists (boolean)
* samples_per_frame (long)
*
*/
static PyObject *dirfile_get_channel_info(PyObject *self, PyObject *args)
{
ACTpolDirfile *dirfile;
char *channel;
if (!PyArg_ParseTuple(args, "O&s",
&pyactpol_ptrobj_decode, &dirfile,
&channel))
Py_RETURN_NONE;
if (dirfile == NULL || channel == NULL) {
Py_RETURN_NONE;
}
int status;
bool exists = false;
long spf = GetSamplesPerFrame(dirfile->format, channel, &status);
exists = (status == GD_E_OK && spf > 0);
if (!exists)
spf = 0;
return Py_BuildValue("Nl",
PyBool_FromLong((long)exists),
spf);
}
/*
* dirfile_load_channel
*
* Load a dirfile channel. Inputs should be sanity checked *before*
* you get here. E.g., to decode negative start and count parameters,
* determine the best data type, etc. It's easier to do that on the
* python side.
*
* In:
* dirfile (dirfile_object)
* channel name (string)
* data_type (string)
* sample_start (long) - first sample to load
* sample_count (long) - number of samples to load
* dest (ndarray, possibly null) - place for the data. Optional.
*
* Out:
* data (ndarray)
*
*/
static char dtype_to_typecode(int dtype) {
switch(dtype) {
case NPY_UINT8:
return 'c';
case NPY_INT16:
return 's';
case NPY_UINT16:
return 'u';
case NPY_INT32:
return 'i';
case NPY_UINT32:
return 'U';
case NPY_FLOAT32:
return 'f';
case NPY_FLOAT64:
return 'd';
}
print_error("request for unhandled numpy type, %i\n", dtype);
return 0;
}
static PyObject *dirfile_load_channel(PyObject *self, PyObject *args)
{
ACTpolDirfile *dirfile;
char *channel;
long sample0, n_samples;
PyArray_Descr* dtype;
PyObject *dest_in;
PyArrayObject *dest_array;
if (!PyArg_ParseTuple(args, "O&sllO&O",
&pyactpol_ptrobj_decode, &dirfile,
&channel,
&sample0,
&n_samples,
PyArray_DescrConverter, &dtype, /* magic */
&dest_in))
Py_RETURN_NONE;
if (dirfile == NULL || channel == NULL) {
Py_RETURN_NONE;
}
if (dest_in == Py_None) {
/* Create a new array to hold the data. */
npy_intp dims[1];
dims[0] = n_samples;
dest_array = (PyArrayObject*)PyArray_SimpleNewFromDescr(1, dims, dtype);
} else if (PyArray_Check(dest_in)) {
/* Drop data into existing array */
dest_array = (PyArrayObject*)dest_in;
dtype = PyArray_DESCR(dest_array);
/* Check array dimensions and strides */
ASSERT_CARRAY_TYPE_NDIM(dest_array, PyArray_TYPE(dest_array), 1);
npy_intp *dims = PyArray_DIMS(dest_array);
po_assert(dims[0] >= n_samples);
/* Get a new reference, since we're returning this like new. */
Py_INCREF(dest_array);
} else {
po_raise("unexpected object type");
}
void *data = PyArray_DATA(dest_array);
int status = GD_E_OK;
long nsamples_out = 0;
/* Release Global Interpreter Lock during I/O */
Py_BEGIN_ALLOW_THREADS
nsamples_out = GetData(dirfile->format, channel,
0, sample0,
0, n_samples,
dtype_to_typecode(dtype->type_num),
data, &status);
Py_END_ALLOW_THREADS
if (status != GD_E_OK) {
print_error("status = %i : %s\n", status, GD_ERROR_CODES[status]);
po_raise("GetData error");
}
if (nsamples_out != n_samples) {
print_error("requested %i samples but only read %i\n",
n_samples, nsamples_out);
po_raise("Dirfile error");
}
return Py_BuildValue("N",
dest_array);
}
struct converter {
int raw;
long mask;
int is_signed;
float scale;
};
/* decode_converter expects to encounter something like a BitField
* object, as defined in util.mce.
*/
int decode_converter(struct converter *conv, PyObject *convo)
{
memset(conv, 0, sizeof(*conv));
if (convo == NULL || convo == Py_None) {
conv->raw = 1;
return 0;
}
PyObject *start = PyObject_GetAttrString(convo, "start");
PyObject *count = PyObject_GetAttrString(convo, "count");
PyObject *is_signed = PyObject_GetAttrString(convo, "signed");
PyObject *scale = PyObject_GetAttrString(convo, "scale");
unsigned long bit = 1 << PyInt_AsLong(start);
conv->scale = bit;
for (int i=0; i < PyInt_AsLong(count); i++) {
conv->mask |= bit;
bit = bit << 1;
}
conv->scale = PyFloat_AsDouble(scale) / conv->scale;
conv->is_signed = (int)PyInt_AsLong(is_signed);
return 0;
}
static PyObject *dirfile_load_channels(PyObject *self, PyObject *args)
{
ACTpolDirfile *dirfile;
PyObject *field_list;
long sample0, n_samples;
PyArray_Descr* dtype;
PyObject *dest_in;
PyArrayObject *dest_array;
PyObject *convo;
struct converter conv;
if (!PyArg_ParseTuple(args, "O&OllO&OO",
&pyactpol_ptrobj_decode, &dirfile,
&field_list,
&sample0,
&n_samples,
PyArray_DescrConverter, &dtype, /* magic */
&dest_in,
&convo))
po_raise("invalid arguments.");
if (dirfile == NULL) {
po_raise("invalid dirfile object");
}
po_assert(PyList_Check(field_list));
if (dest_in == Py_None) {
/* Create a new array to hold the data. */
npy_intp dims[2];
dims[0] = PyList_Size(field_list);
dims[1] = n_samples;
dest_array = (PyArrayObject*)PyArray_SimpleNewFromDescr(2, dims, dtype);
} else if (PyArray_Check(dest_in)) {
/* Drop data into existing array */
dest_array = (PyArrayObject*)dest_in;
dtype = PyArray_DESCR(dest_array);
/* Check array dimensions and strides */
ASSERT_CARRAY_TYPE_NDIM(dest_array, PyArray_TYPE(dest_array), 2);
npy_intp *dims = PyArray_DIMS(dest_array);
po_assert(dims[0] == PyList_Size(field_list));
po_assert(dims[1] == n_samples);
/* Get a new reference, since we're returning this like new. */
Py_INCREF(dest_array);
} else {
po_raise("unexpected object type");
}
void *data = PyArray_DATA(dest_array);
int any_errors = 0;
long major_stride = PyArray_STRIDES(dest_array)[0]; // Yes, it's bytes.
char gd_type = dtype_to_typecode(dtype->type_num);
decode_converter(&conv, convo);
if (!conv.raw) {
po_assert(dtype->type_num == NPY_FLOAT32);
gd_type = 'U';
}
/* Release Global Interpreter Lock during I/O */
Py_BEGIN_ALLOW_THREADS
#pragma omp parallel shared(any_errors)
// Per-thread buffer is needed if we're translating the data.
{
void *buf = NULL;
if (!conv.raw)
buf = malloc(n_samples * sizeof(uint32_t));
#pragma omp for
for (long i=0; i<PyList_Size(field_list); i++) {
const char *channel = PyString_AsString(PyList_GET_ITEM(field_list, i));
int status = GD_E_OK;
long nsamples_out = 0;
if (conv.raw)
buf = data + i * major_stride;
nsamples_out = GetData(dirfile->format, channel,
0, sample0, 0, n_samples,
gd_type, buf,
&status);
if (!conv.raw) {
if (conv.is_signed)
extract_float32_int32(data + i*major_stride, buf, n_samples,
conv.mask, conv.scale);
else
extract_float32_uint32(data + i*major_stride, buf, n_samples,
conv.mask, conv.scale);
}
if (nsamples_out != n_samples) {
print_error("Field %s: requested %i samples but only read %i\n",
channel, n_samples, nsamples_out);
any_errors = 1;
}
if (status != GD_E_OK) {
print_error("Field %s: status = %i : %s\n", channel,
status, GD_ERROR_CODES[status]);
any_errors = 1;
}
}
if (!conv.raw) free(buf);
}
Py_END_ALLOW_THREADS
if (any_errors)
po_raise("Dirfile error");
return Py_BuildValue("N", dest_array);
}
int extract_float32_int32(float *dest, int32_t *src, long n,
long mask, float scale)
{
int32_t _mask = mask;
for (long i=0; i<n; i++)
dest[i] = (float)(src[i] & _mask) * scale;
return n;
}
int extract_float32_uint32(float *dest, uint32_t *src, long n,
long mask, float scale)
{
uint32_t _mask = mask;
for (long i=0; i<n; i++)
dest[i] = (float)(src[i] & _mask) * scale;
return n;
}
PyMethodDef pyactpol_dirfile_methods[] = {
{"dirfile_open", dirfile_open, METH_VARARGS,
""},
{"dirfile_close", dirfile_close, METH_VARARGS,
""},
{"dirfile_get_channel_info", dirfile_get_channel_info, METH_VARARGS,
"Given (dirfile, channel), returns (existence, samples_per_frame)."},
{"dirfile_get_frame_count", dirfile_get_frame_count, METH_VARARGS,
"Given (dirfile, channel), returns (n_samples, 0)."},
{"dirfile_load_channel", dirfile_load_channel, METH_VARARGS,
"Load dirfile channel."},
{"dirfile_load_channels", dirfile_load_channels, METH_VARARGS,
"Load multiple dirfile channels (openmp)."},
{NULL, NULL, 0, NULL} /* Sentinel */
};
void pyactpol_dirfile_init()
{
if (PyType_Ready(&ptrobjType) < 0)
return;
Py_INCREF(&ptrobjType);
}
|
GB_binop__bset_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint32)
// C=scalar+B GB (_bind1st__bset_uint32)
// C=scalar+B' GB (_bind1st_tran__bset_uint32)
// C=A+scalar GB (_bind2nd__bset_uint32)
// C=A'+scalar GB (_bind2nd_tran__bset_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_BITSET (aij, bij, uint32_t, 32)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, uint32_t, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT32 || GxB_NO_BSET_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, uint32_t, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, uint32_t, 32) ; \
}
GrB_Info GB (_bind1st_tran__bset_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, uint32_t, 32) ; \
}
GrB_Info GB (_bind2nd_tran__bset_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_1x1_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_pack4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4b-4a-inch/4a-outch/4b
kernel_pack4.create(1, inch / 4, outch / 4, (size_t)4u * 16, 16);
int q = 0;
for (; q + 3 < outch; q += 4)
{
const float* k0 = (const float*)kernel + (q + 0) * inch;
const float* k1 = (const float*)kernel + (q + 1) * inch;
const float* k2 = (const float*)kernel + (q + 2) * inch;
const float* k3 = (const float*)kernel + (q + 3) * inch;
float* g0 = kernel_pack4.channel(q / 4);
for (int p = 0; p + 3 < inch; p += 4)
{
g0[0] = k0[0];
g0[1] = k1[0];
g0[2] = k2[0];
g0[3] = k3[0];
g0[4] = k0[1];
g0[5] = k1[1];
g0[6] = k2[1];
g0[7] = k3[1];
g0[8] = k0[2];
g0[9] = k1[2];
g0[10] = k2[2];
g0[11] = k3[2];
g0[12] = k0[3];
g0[13] = k1[3];
g0[14] = k2[3];
g0[15] = k3[3];
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
g0 += 16;
}
}
}
static void conv1x1s1_sgemm_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(4, inch, size / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
{
int nn_size;
int remain_size_start;
remain_size_start = 0;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
float* tmpptr = tmp.channel(i / 4);
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(img0);
__m128 _r1 = _mm_loadu_ps(img0 + 4);
__m128 _r2 = _mm_loadu_ps(img0 + 8);
__m128 _r3 = _mm_loadu_ps(img0 + 12);
_mm_storeu_ps(tmpptr, _r0);
_mm_storeu_ps(tmpptr + 4, _r1);
_mm_storeu_ps(tmpptr + 8, _r2);
_mm_storeu_ps(tmpptr + 12, _r3);
tmpptr += 16;
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(img0);
__m128 _r1 = _mm_loadu_ps(img0 + 4);
_mm_storeu_ps(tmpptr, _r0);
_mm_storeu_ps(tmpptr + 4, _r1);
tmpptr += 8;
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(img0);
_mm_storeu_ps(tmpptr, _r0);
tmpptr += 4;
img0 += bottom_blob.cstep * 4;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 4);
const float* kptr0 = (const float*)kernel.channel(p);
__m128 _sum0 = _mm_loadu_ps(biasptr);
__m128 _sum1 = _mm_loadu_ps(biasptr);
__m128 _sum2 = _mm_loadu_ps(biasptr);
__m128 _sum3 = _mm_loadu_ps(biasptr);
for (int q = 0; q < inch; q++)
{
__m128 _val00 = _mm_load1_ps(tmpptr);
__m128 _val01 = _mm_load1_ps(tmpptr + 1);
__m128 _val02 = _mm_load1_ps(tmpptr + 2);
__m128 _val03 = _mm_load1_ps(tmpptr + 3);
__m128 _val10 = _mm_load1_ps(tmpptr + 4);
__m128 _val11 = _mm_load1_ps(tmpptr + 5);
__m128 _val12 = _mm_load1_ps(tmpptr + 6);
__m128 _val13 = _mm_load1_ps(tmpptr + 7);
__m128 _val20 = _mm_load1_ps(tmpptr + 8);
__m128 _val21 = _mm_load1_ps(tmpptr + 9);
__m128 _val22 = _mm_load1_ps(tmpptr + 10);
__m128 _val23 = _mm_load1_ps(tmpptr + 11);
__m128 _val30 = _mm_load1_ps(tmpptr + 12);
__m128 _val31 = _mm_load1_ps(tmpptr + 13);
__m128 _val32 = _mm_load1_ps(tmpptr + 14);
__m128 _val33 = _mm_load1_ps(tmpptr + 15);
__m128 _w0 = _mm_load_ps(kptr0);
__m128 _w1 = _mm_load_ps(kptr0 + 4);
__m128 _w2 = _mm_load_ps(kptr0 + 8);
__m128 _w3 = _mm_load_ps(kptr0 + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm_fmadd_ps(_w3, _val03, _sum0);
_sum1 = _mm_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm_fmadd_ps(_w3, _val13, _sum1);
_sum2 = _mm_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm_fmadd_ps(_w3, _val23, _sum2);
_sum3 = _mm_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm_fmadd_ps(_w3, _val33, _sum3);
#else
_sum0 = _mm_add_ps(_mm_mul_ps(_w0, _val00), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w1, _val01), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w2, _val02), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w3, _val03), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_w0, _val10), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w1, _val11), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w2, _val12), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w3, _val13), _sum1);
_sum2 = _mm_add_ps(_mm_mul_ps(_w0, _val20), _sum2);
_sum2 = _mm_add_ps(_mm_mul_ps(_w1, _val21), _sum2);
_sum2 = _mm_add_ps(_mm_mul_ps(_w2, _val22), _sum2);
_sum2 = _mm_add_ps(_mm_mul_ps(_w3, _val23), _sum2);
_sum3 = _mm_add_ps(_mm_mul_ps(_w0, _val30), _sum3);
_sum3 = _mm_add_ps(_mm_mul_ps(_w1, _val31), _sum3);
_sum3 = _mm_add_ps(_mm_mul_ps(_w2, _val32), _sum3);
_sum3 = _mm_add_ps(_mm_mul_ps(_w3, _val33), _sum3);
#endif
tmpptr += 16;
kptr0 += 16;
}
_mm_store_ps(outptr0, _sum0);
_mm_store_ps(outptr0 + 4, _sum1);
_mm_store_ps(outptr0 + 8, _sum2);
_mm_store_ps(outptr0 + 12, _sum3);
outptr0 += 16;
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
const float* kptr0 = (const float*)kernel.channel(p);
__m128 _sum0 = _mm_loadu_ps(biasptr);
__m128 _sum1 = _mm_loadu_ps(biasptr);
for (int q = 0; q < inch; q++)
{
__m128 _val00 = _mm_load1_ps(tmpptr);
__m128 _val01 = _mm_load1_ps(tmpptr + 1);
__m128 _val02 = _mm_load1_ps(tmpptr + 2);
__m128 _val03 = _mm_load1_ps(tmpptr + 3);
__m128 _val10 = _mm_load1_ps(tmpptr + 4);
__m128 _val11 = _mm_load1_ps(tmpptr + 5);
__m128 _val12 = _mm_load1_ps(tmpptr + 6);
__m128 _val13 = _mm_load1_ps(tmpptr + 7);
__m128 _w0 = _mm_load_ps(kptr0);
__m128 _w1 = _mm_load_ps(kptr0 + 4);
__m128 _w2 = _mm_load_ps(kptr0 + 8);
__m128 _w3 = _mm_load_ps(kptr0 + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm_fmadd_ps(_w3, _val03, _sum0);
_sum1 = _mm_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm_fmadd_ps(_w3, _val13, _sum1);
#else
_sum0 = _mm_add_ps(_mm_mul_ps(_w0, _val00), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w1, _val01), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w2, _val02), _sum0);
_sum0 = _mm_add_ps(_mm_mul_ps(_w3, _val03), _sum0);
_sum1 = _mm_add_ps(_mm_mul_ps(_w0, _val10), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w1, _val11), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w2, _val12), _sum1);
_sum1 = _mm_add_ps(_mm_mul_ps(_w3, _val13), _sum1);
#endif
tmpptr += 8;
kptr0 += 16;
}
_mm_store_ps(outptr0, _sum0);
_mm_store_ps(outptr0 + 4, _sum1);
outptr0 += 8;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
const float* kptr0 = (const float*)kernel.channel(p);
__m128 _sum = _mm_loadu_ps(biasptr);
for (int q = 0; q < inch; q++)
{
__m128 _val0 = _mm_load1_ps(tmpptr);
__m128 _val1 = _mm_load1_ps(tmpptr + 1);
__m128 _val2 = _mm_load1_ps(tmpptr + 2);
__m128 _val3 = _mm_load1_ps(tmpptr + 3);
__m128 _w0 = _mm_load_ps(kptr0);
__m128 _w1 = _mm_load_ps(kptr0 + 4);
__m128 _w2 = _mm_load_ps(kptr0 + 8);
__m128 _w3 = _mm_load_ps(kptr0 + 12);
#if __AVX__
_sum = _mm_fmadd_ps(_w0, _val0, _sum);
_sum = _mm_fmadd_ps(_w1, _val1, _sum);
_sum = _mm_fmadd_ps(_w2, _val2, _sum);
_sum = _mm_fmadd_ps(_w3, _val3, _sum);
#else
_sum = _mm_add_ps(_mm_mul_ps(_w0, _val0), _sum);
_sum = _mm_add_ps(_mm_mul_ps(_w1, _val1), _sum);
_sum = _mm_add_ps(_mm_mul_ps(_w2, _val2), _sum);
_sum = _mm_add_ps(_mm_mul_ps(_w3, _val3), _sum);
#endif
tmpptr += 4;
kptr0 += 16;
}
_mm_store_ps(outptr0, _sum);
outptr0 += 4;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// float* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const float* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const float* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m128 _v = _mm_load_ps(r0);
_mm_store_ps(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_sse(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
OpenMP-check.c | #include <stdio.h>
#include <omp.h>
int main(void) {
#pragma omp parallel
{
printf("I'm a parallel region.\n");
}
return 0;
}
|
ast-dump-openmp-cancel.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp parallel
{
#pragma omp cancel parallel
}
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-cancel.c:3:1, line:8:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:8:1>
// CHECK-NEXT: `-OMPParallelDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, line:7:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CompoundStmt {{.*}} <line:5:3, line:7:3> openmp_structured_block
// CHECK-NEXT: | `-OMPCancelDirective {{.*}} <line:6:1, col:28> openmp_standalone_directive
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-cancel.c:4:1) *const restrict'
|
GB_unop__identity_fc32_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc32_int8
// op(A') function: GB_unop_tran__identity_fc32_int8
// C type: GxB_FC32_t
// A type: int8_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc32_int8
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/compare.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/statistic.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImageChannels() compares one or more image channels of an image
% to a reconstructed image and returns the difference image.
%
% The format of the CompareImageChannels method is:
%
% Image *CompareImageChannels(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
Image
*highlight_image;
highlight_image=CompareImageChannels(image,reconstruct_image,
CompositeChannels,metric,distortion,exception);
return(highlight_image);
}
static size_t GetNumberChannels(const Image *image,const ChannelType channel)
{
size_t
channels;
channels=0;
if ((channel & RedChannel) != 0)
channels++;
if ((channel & GreenChannel) != 0)
channels++;
if ((channel & BlueChannel) != 0)
channels++;
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
channels++;
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
channels++;
return(channels == 0 ? 1UL : channels);
}
static inline MagickBooleanType ValidateImageMorphology(
const Image *magick_restrict image,
const Image *magick_restrict reconstruct_image)
{
/*
Does the image match the reconstructed image morphology?
*/
if (GetNumberChannels(image,DefaultChannels) !=
GetNumberChannels(reconstruct_image,DefaultChannels))
return(MagickFalse);
return(MagickTrue);
}
MagickExport Image *CompareImageChannels(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
MagickPixelPacket
highlight,
lowlight,
zero;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowImageException(ImageError,"ImageMorphologyDiffers");
status=GetImageChannelDistortion(image,reconstruct_image,channel,metric,
distortion,exception);
if (status == MagickFalse)
return((Image *) NULL);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,(Image *) NULL);
difference_image=CloneImage(clone_image,0,0,MagickTrue,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
if (SetImageStorageClass(highlight_image,DirectClass) == MagickFalse)
{
InheritException(exception,&highlight_image->exception);
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,(Image *) NULL);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel);
(void) QueryMagickColor("#f1001ecc",&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&highlight,exception);
(void) QueryMagickColor("#ffffffcc",&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&lowlight,exception);
if (highlight_image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&lowlight);
}
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel,
reconstruct_pixel;
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register IndexPacket
*magick_restrict highlight_indexes;
register PixelPacket
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
highlight_indexes=GetCacheViewAuthenticIndexQueue(highlight_view);
pixel=zero;
reconstruct_pixel=zero;
for (x=0; x < (ssize_t) columns; x++)
{
MagickStatusType
difference;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x,
&reconstruct_pixel);
difference=MagickFalse;
if (channel == CompositeChannels)
{
if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse)
difference=MagickTrue;
}
else
{
double
Da,
distance,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(q) :
(QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=Sa*GetPixelRed(p)-Da*GetPixelRed(q);
if ((distance*distance) > fuzz)
difference=MagickTrue;
}
if ((channel & GreenChannel) != 0)
{
distance=Sa*GetPixelGreen(p)-Da*GetPixelGreen(q);
if ((distance*distance) > fuzz)
difference=MagickTrue;
}
if ((channel & BlueChannel) != 0)
{
distance=Sa*GetPixelBlue(p)-Da*GetPixelBlue(q);
if ((distance*distance) > fuzz)
difference=MagickTrue;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=(double) GetPixelOpacity(p)-GetPixelOpacity(q);
if ((distance*distance) > fuzz)
difference=MagickTrue;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
distance=Sa*indexes[x]-Da*reconstruct_indexes[x];
if ((distance*distance) > fuzz)
difference=MagickTrue;
}
}
if (difference != MagickFalse)
SetPixelPacket(highlight_image,&highlight,r,highlight_indexes+x);
else
SetPixelPacket(highlight_image,&lowlight,r,highlight_indexes+x);
p++;
q++;
r++;
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,image->compose,highlight_image,0,0);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortion() compares one or more image channels of an image
% to a reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageChannelDistortion method is:
%
% MagickBooleanType GetImageChannelDistortion(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelDistortion(image,reconstruct_image,CompositeChannels,
metric,distortion,exception);
return(status);
}
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=MagickMin(GetNumberChannels(image,channel),
GetNumberChannels(reconstruct_image,channel))*
GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
distance,
pixel,
Sa;
MagickBooleanType
difference;
difference=MagickFalse;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(q) :
(QuantumRange-OpaqueOpacity));
distance=0.0;
if ((channel & RedChannel) != 0)
{
pixel=Sa*GetPixelRed(p)-Da*GetPixelRed(q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[RedChannel]++;
difference=MagickTrue;
}
}
if ((channel & GreenChannel) != 0)
{
pixel=Sa*GetPixelGreen(p)-Da*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[GreenChannel]++;
difference=MagickTrue;
}
}
if ((channel & BlueChannel) != 0)
{
pixel=Sa*GetPixelBlue(p)-Da*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[BlueChannel]++;
difference=MagickTrue;
}
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=(double) GetPixelOpacity(p)-GetPixelOpacity(q);
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[OpacityChannel]++;
difference=MagickTrue;
}
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=Sa*indexes[x]-Da*reconstruct_indexes[x];
distance+=pixel*pixel;
if (distance > fuzz)
{
channel_distortion[BlackChannel]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositeChannels]++;
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) ||
(reconstruct_image->matte != MagickFalse)))
{
distance=QuantumScale*((image->matte != MagickFalse ?
GetPixelOpacity(p) : OpaqueOpacity)-
(reconstruct_image->matte != MagickFalse ?
GetPixelOpacity(q): OpaqueOpacity));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-
Da*GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
channel_distortion[OpacityChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
MagickRealType
area,
gamma,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
distortion[RedChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & GreenChannel) != 0)
{
distance=fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
distortion[GreenChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & BlueChannel) != 0)
{
distance=fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
distortion[BlueChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=fabs((double) GetPixelOpacity(p)-
GetPixelOpacity(q));
distortion[OpacityChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
distortion[BlackChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*distortion[CompositeChannels];
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*(GetPixelOpacity(p)-(MagickRealType)
GetPixelOpacity(q));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
area;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageChannelStatistics(image,exception);
reconstruct_statistics=GetImageChannelStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=1.0/((MagickRealType) columns*rows);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
distortion[RedChannel]+=area*QuantumScale*(Sa*GetPixelRed(p)-
image_statistics[RedChannel].mean)*(Da*GetPixelRed(q)-
reconstruct_statistics[RedChannel].mean);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]+=area*QuantumScale*(Sa*GetPixelGreen(p)-
image_statistics[GreenChannel].mean)*(Da*GetPixelGreen(q)-
reconstruct_statistics[GreenChannel].mean);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]+=area*QuantumScale*(Sa*GetPixelBlue(p)-
image_statistics[BlueChannel].mean)*(Da*GetPixelBlue(q)-
reconstruct_statistics[BlueChannel].mean);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
distortion[OpacityChannel]+=area*QuantumScale*(
GetPixelOpacity(p)-image_statistics[OpacityChannel].mean)*
(GetPixelOpacity(q)-reconstruct_statistics[OpacityChannel].mean);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
distortion[BlackChannel]+=area*QuantumScale*(Sa*
GetPixelIndex(indexes+x)-image_statistics[BlackChannel].mean)*(Da*
GetPixelIndex(reconstruct_indexes+x)-
reconstruct_statistics[BlackChannel].mean);
p++;
q++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
gamma;
gamma=image_statistics[i].standard_deviation*
reconstruct_statistics[i].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
}
distortion[CompositeChannels]=0.0;
if ((channel & RedChannel) != 0)
distortion[CompositeChannels]+=distortion[RedChannel]*
distortion[RedChannel];
if ((channel & GreenChannel) != 0)
distortion[CompositeChannels]+=distortion[GreenChannel]*
distortion[GreenChannel];
if ((channel & BlueChannel) != 0)
distortion[CompositeChannels]+=distortion[BlueChannel]*
distortion[BlueChannel];
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
distortion[CompositeChannels]+=distortion[OpacityChannel]*
distortion[OpacityChannel];
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
distortion[CompositeChannels]+=distortion[BlackChannel]*
distortion[BlackChannel];
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]/
GetNumberChannels(image,channel));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
if (distance > channel_distortion[RedChannel])
channel_distortion[RedChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
if (distance > channel_distortion[GreenChannel])
channel_distortion[GreenChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
if (distance > channel_distortion[BlueChannel])
channel_distortion[BlueChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs(GetPixelOpacity(p)-(double)
GetPixelOpacity(q));
if (distance > channel_distortion[OpacityChannel])
channel_distortion[OpacityChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
if (distance > channel_distortion[BlackChannel])
channel_distortion[BlackChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
if (channel_distortion[i] > distortion[i])
distortion[i]=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
{
if (fabs(distortion[RedChannel]) < MagickEpsilon)
distortion[RedChannel]=INFINITY;
else
distortion[RedChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[RedChannel]);
}
if ((channel & GreenChannel) != 0)
{
if (fabs(distortion[GreenChannel]) < MagickEpsilon)
distortion[GreenChannel]=INFINITY;
else
distortion[GreenChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[GreenChannel]);
}
if ((channel & BlueChannel) != 0)
{
if (fabs(distortion[BlueChannel]) < MagickEpsilon)
distortion[BlueChannel]=INFINITY;
else
distortion[BlueChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[BlueChannel]);
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
if (fabs(distortion[OpacityChannel]) < MagickEpsilon)
distortion[OpacityChannel]=INFINITY;
else
distortion[OpacityChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[OpacityChannel]);
}
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
if (fabs(distortion[BlackChannel]) < MagickEpsilon)
distortion[BlackChannel]=INFINITY;
else
distortion[BlackChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[BlackChannel]);
}
if (fabs(distortion[CompositeChannels]) < MagickEpsilon)
distortion[CompositeChannels]=INFINITY;
else
distortion[CompositeChannels]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[CompositeChannels]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*image_phash,
*reconstruct_phash;
double
difference;
register ssize_t
i;
/*
Compute perceptual hash in the sRGB colorspace.
*/
image_phash=GetImageChannelPerceptualHash(image,exception);
if (image_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImageChannelPerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickFalse);
}
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
/*
Compute sum of moment differences squared.
*/
if ((channel & RedChannel) != 0)
{
difference=reconstruct_phash[RedChannel].P[i]-
image_phash[RedChannel].P[i];
distortion[RedChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & GreenChannel) != 0)
{
difference=reconstruct_phash[GreenChannel].P[i]-
image_phash[GreenChannel].P[i];
distortion[GreenChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & BlueChannel) != 0)
{
difference=reconstruct_phash[BlueChannel].P[i]-
image_phash[BlueChannel].P[i];
distortion[BlueChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) &&
(reconstruct_image->matte != MagickFalse))
{
difference=reconstruct_phash[OpacityChannel].P[i]-
image_phash[OpacityChannel].P[i];
distortion[OpacityChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
difference=reconstruct_phash[IndexChannel].P[i]-
image_phash[IndexChannel].P[i];
distortion[IndexChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
}
/*
Compute perceptual hash in the HCLP colorspace.
*/
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
/*
Compute sum of moment differences squared.
*/
if ((channel & RedChannel) != 0)
{
difference=reconstruct_phash[RedChannel].Q[i]-
image_phash[RedChannel].Q[i];
distortion[RedChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & GreenChannel) != 0)
{
difference=reconstruct_phash[GreenChannel].Q[i]-
image_phash[GreenChannel].Q[i];
distortion[GreenChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & BlueChannel) != 0)
{
difference=reconstruct_phash[BlueChannel].Q[i]-
image_phash[BlueChannel].Q[i];
distortion[BlueChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) &&
(reconstruct_image->matte != MagickFalse))
{
difference=reconstruct_phash[OpacityChannel].Q[i]-
image_phash[OpacityChannel].Q[i];
distortion[OpacityChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
difference=reconstruct_phash[IndexChannel].Q[i]-
image_phash[IndexChannel].Q[i];
distortion[IndexChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
distortion[RedChannel]=sqrt(distortion[RedChannel]);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]=sqrt(distortion[GreenChannel]);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]=sqrt(distortion[BlueChannel]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
distortion[OpacityChannel]=sqrt(distortion[OpacityChannel]);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
distortion[BlackChannel]=sqrt(distortion[BlackChannel]);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
MagickExport MagickBooleanType GetImageChannelDistortion(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowBinaryException(ImageError,"ImageMorphologyDiffers",image->filename);
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositeChannels];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortions() compares the image channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageChannelDistortions method is:
%
% double *GetImageChannelDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageChannelDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageMorphologyDiffers","`%s'",image->filename);
return((double *) NULL);
}
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(Image *image,
% const Image *reconstruct_image)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
*/
MagickExport MagickBooleanType IsImagesEqual(Image *image,
const Image *reconstruct_image)
{
CacheView
*image_view,
*reconstruct_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
area,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
exception=(&image->exception);
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowBinaryException(ImageError,"ImageMorphologyDiffers",image->filename);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance;
distance=fabs(GetPixelRed(p)-(double) GetPixelRed(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs(GetPixelGreen(p)-(double) GetPixelGreen(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs(GetPixelBlue(p)-(double) GetPixelBlue(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
if (image->matte != MagickFalse)
{
distance=fabs(GetPixelOpacity(p)-(double) GetPixelOpacity(q));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs(GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reconstruct_indexes+x));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
(void) status;
similarity_image=DestroyImage(similarity_image);
return(distortion);
}
MagickExport Image *SimilarityImage(Image *image,const Image *reference,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
Image
*similarity_image;
similarity_image=SimilarityMetricImage(image,reference,
RootMeanSquaredErrorMetric,offset,similarity_metric,exception);
return(similarity_image);
}
MagickExport Image *SimilarityMetricImage(Image *image,const Image *reference,
const MetricType metric,RectangleInfo *offset,double *similarity_metric,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
const char
*artifact;
double
similarity_threshold;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
if (ValidateImageMorphology(image,reference) == MagickFalse)
ThrowImageException(ImageError,"ImageMorphologyDiffers");
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(similarity_image,DirectClass) == MagickFalse)
{
InheritException(exception,&similarity_image->exception);
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel);
/*
Measure similarity of reference image against image.
*/
similarity_threshold=(-1.0);
artifact=GetImageArtifact(image,"compare:similarity-threshold");
if (artifact != (const char *) NULL)
similarity_threshold=StringToDouble(artifact,(char **) NULL);
status=MagickTrue;
progress=0;
similarity_view=AcquireVirtualCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
*similarity_metric=similarity;
offset->x=x;
offset->y=y;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
SetPixelRed(q,ClampToQuantum(QuantumRange-QuantumRange*similarity));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
strmm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/ztrmm.c, normal z -> s, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_trmm
*
* Performs a triangular matrix-matrix multiply of the form
*
* \f[B = \alpha [op(A) \times B] \f], if side = PlasmaLeft or
* \f[B = \alpha [B \times op(A)] \f], if side = PlasmaRight
*
* where op( X ) is one of:
*
* - op(A) = A or
* - op(A) = A^T or
* - op(A) = A^T
*
* alpha is a scalar, B is an m-by-n matrix and A is a unit or non-unit, upper
* or lower triangular matrix.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] m
* The number of rows of matrix B.
* m >= 0.
*
* @param[in] n
* The number of columns of matrix B.
* n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* The triangular matrix A of dimension lda-by-k, where k is m when
* side='L' or 'l' and k is n when when side='R' or 'r'. If uplo =
* PlasmaUpper, the leading k-by-k upper triangular part of the array
* A contains the upper triangular matrix, and the strictly lower
* triangular part of A is not referenced. If uplo = PlasmaLower, the
* leading k-by-k lower triangular part of the array A contains the
* lower triangular matrix, and the strictly upper triangular part of
* A is not referenced. If diag = PlasmaUnit, the diagonal elements of
* A are also not referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. When side='L' or 'l',
* lda >= max(1,m), when side='R' or 'r' then lda >= max(1,n).
*
* @param[in,out] pB
* On entry, the matrix B of dimension ldb-by-n.
* On exit, the result of a triangular matrix-matrix multiply
* ( alpha*op(A)*B ) or ( alpha*B*op(A) ).
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_strmm
* @sa plasma_ctrmm
* @sa plasma_dtrmm
* @sa plasma_strmm
*
******************************************************************************/
int plasma_strmm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
float alpha, float *pA, int lda,
float *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_error("illegal value of side");
return -1;
}
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
return -2;
}
if (transa != PlasmaConjTrans &&
transa != PlasmaNoTrans &&
transa != PlasmaTrans )
{
plasma_error("illegal value of transa");
return -3;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
return -4;
}
if (m < 0) {
plasma_error("illegal value of m");
return -5;
}
if (n < 0) {
plasma_error("illegal value of n");
return -6;
}
int k = (side == PlasmaLeft) ? m : n;
if (lda < imax(1, k)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, m)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_trmm(plasma, PlasmaRealFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_triangular_create(PlasmaRealFloat, uplo, nb, nb,
k, k, 0, 0, k, k, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_triangular_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate matrices to tile layout.
plasma_omp_str2desc(pA, lda, A, &sequence, &request);
plasma_omp_sge2desc(pB, ldb, B, &sequence, &request);
// Call tile async interface.
plasma_omp_strmm(side, uplo, transa, diag,
alpha, A,
B,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_sdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
return sequence.status;
}
/***************************************************************************//**
*
* @ingroup plasma_trmm
*
* Performs triangular matrix multiplication. Non-blocking tile version of
* plasma_strmm(). May return before the computation is finished. Operates on
* matrices stored by tiles. All matrices are passed through descriptors. All
* dimensions are taken from the descriptors. Allows for pipelining of
* operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of the triangular matrix A.
*
* @param[in,out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_strmm
* @sa plasma_omp_ctrmm
* @sa plasma_omp_dtrmm
* @sa plasma_omp_strmm
*
******************************************************************************/
void plasma_omp_strmm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
float alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorNotInitialized);
return;
}
// Check input arguments.
if (side != PlasmaLeft && side != PlasmaRight) {
plasma_error("illegal value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (uplo != PlasmaUpper && uplo != PlasmaLower) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (transa != PlasmaConjTrans &&
transa != PlasmaNoTrans &&
transa != PlasmaTrans) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (diag != PlasmaUnit && diag != PlasmaNonUnit) {
plasma_error("illegal value of diag");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0 || A.n == 0 || B.m == 0 || B.n == 0)
return;
if (alpha == 0.0) {
float zzero = 0.0;
plasma_pslaset(PlasmaGeneral, zzero, zzero, B, sequence, request);
return;
}
// Call parallel function.
plasma_pstrmm(side, uplo, transa, diag, alpha,
A, B,
sequence, request);
}
|
dem_fem_search.h | //
// Project Name: Kratos
// Last Modified by: $Author: msantasusana, croig $
// Date: $Date: 2015-10-26 19:37:47 $
// Revision: $Revision: 1.2 $
//
//
#if !defined(KRATOS_DEM_FEM_SEARCH_H_INCLUDED )
#define KRATOS_DEM_FEM_SEARCH_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// include kratos definitions
#include "includes/define.h"
// Project includes
#include "utilities/openmp_utils.h"
// Configures
#include "rigid_face_geometrical_object_configure.h"
// Search
#include "spatial_containers/bins_dynamic_objects.h"
#include "spatial_containers/bins_dynamic.h"
// External includes
//#define CUSTOMTIMER
/* Timer defines */
#include "utilities/timer.h"
#ifdef CUSTOMTIMER
#define KRATOS_TIMER_START(t) Timer::Start(t);
#define KRATOS_TIMER_STOP(t) Timer::Stop(t);
#else
#define KRATOS_TIMER_START(t)
#define KRATOS_TIMER_STOP(t)
#endif
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class KRATOS_API(DEM_APPLICATION) DEM_FEM_Search : public SpatialSearch
{
public:
///@name Type Definitions
///@{
/// Pointer definition of OMP_DEMSearch
KRATOS_CLASS_POINTER_DEFINITION(DEM_FEM_Search);
typedef PointType* PtrPointType;
typedef std::vector<PtrPointType>* PointVector;
typedef std::vector<PtrPointType>::iterator PointIterator;
typedef double* DistanceVector;
typedef double* DistanceIterator;
// //Configure Types
// typedef RigidFaceConfigure<3> ElementConfigureType; //Element
// //Bin Types
// typedef BinsObjectDynamic<ElementConfigureType> BinsType;
//
//Configure Types
typedef RigidFaceGeometricalObjectConfigure<3> RigidFaceGeometricalConfigureType;
//Bin Types
typedef BinsObjectDynamic<RigidFaceGeometricalConfigureType> GeometricalBinsType;
//typedef PointerVectorSet<GeometricalObject, IndexedObject> GeometricalObjectType;
typedef typename RigidFaceGeometricalConfigureType::ElementsContainerType GeometricalObjectType;
//typedef PointerVector<GeometricalObject> GeometricalObjectType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
DEM_FEM_Search(){
mBins = NULL;
}
/// Destructor.
~DEM_FEM_Search(){
}
void SearchRigidFaceForDEMInRadiusExclusiveImplementation (
ElementsContainerType const& rElements,
ConditionsContainerType const& rConditions,
VectorResultConditionsContainerType& rResults,
VectorDistanceType& rResultsDistance)
{
KRATOS_TRY
/*
STEPS:
¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨¨
1. INITIALIZE
2. CALCULATE THE DEM BBX
3. GATHER DEM BOUNDING BOX
4. FIND THE FE INSIDE DEM_BB TO BUILD THE BINS AND CONSTRUCT THE GLOBAL BBX
5. GATHER FEM ELEMENTS AND THE GLOBAL BBX
6. BUILD THE BINS
7. PERFORM THE SEARCH FOR THE SPHERES INSIDE THE BBX (amplified by the radius of each particle)
*/
//1. INITIALIZE
int MaxNumberOfElements = rConditions.size();
ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&> (rElements.GetContainer());
ConditionsContainerType::ContainerType& conditions_bins = const_cast<ConditionsContainerType::ContainerType&>(rConditions.GetContainer());
//GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsConditionPointerToGeometricalObjecPointerTemporalVector;
RadiusArrayType Radius_out;
int num_of_threads = ParallelUtilities::GetNumThreads();
std::vector<unsigned int> total_dem_partition_index;
std::vector<unsigned int> total_fem_partition_index;
OpenMPUtils::CreatePartition(num_of_threads, elements_sear.size(), total_dem_partition_index);
OpenMPUtils::CreatePartition(num_of_threads, conditions_bins.size(), total_fem_partition_index);
//std::vector<GeometricalObjectType::ContainerType> Vector_SearElementPointerToGeometricalObjecPointerTemporalVector(num_of_threads);
std::vector<GeometricalObjectType::ContainerType> Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector(num_of_threads);
std::vector<array_1d<double, 3> > Vector_DEM_BB_LowPoint(num_of_threads); std::vector <array_1d<double, 3 > > Vector_DEM_BB_HighPoint(num_of_threads);
std::vector<array_1d<double, 3> > Vector_GLOBAL_BB_LowPoint(num_of_threads); std::vector <array_1d<double, 3 > > Vector_GLOBAL_BB_HighPoint(num_of_threads);
std::vector<double> Vector_Ref_Radius(num_of_threads);
std::vector<RadiusArrayType> Vector_Radius_out(num_of_threads);
double Global_Ref_Radius = 0.0;
double inf = std::numeric_limits<double>::infinity();
for (std::size_t i = 0; i < 3; i++) {
DEM_BB_LowPoint[i] = inf;
DEM_BB_HighPoint[i] = -inf;
mGlobal_BB_LowPoint[i] = inf;
mGlobal_BB_HighPoint[i] = -inf;
}
typedef ElementsContainerType::ContainerType::iterator Elem_iter;
typedef ConditionsContainerType::ContainerType::iterator Cond_iter;
//2. CALCULATE THE DEM BBX
#pragma omp parallel
{
double radius = 0.0;
int k = OpenMPUtils::ThisThread();
for(std::size_t i = 0; i < 3; i++) {
Vector_DEM_BB_LowPoint[k][i] = inf;
Vector_DEM_BB_HighPoint[k][i] = -inf;
}
#pragma omp for
for (int p = 0; p <(int) elements_sear.size(); p++) {
Elem_iter it = elements_sear.begin() + p;
GeometryType& pGeometry = (*it)->GetGeometry();
const array_1d<double, 3 >& aux_coor = pGeometry[0].Coordinates();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>((*it).get());
radius = p_particle->GetSearchRadius();
Vector_Ref_Radius[k] = (Vector_Ref_Radius[k] < radius) ? radius : Vector_Ref_Radius[k] ;
for(std::size_t i = 0; i < 3; i++) {
Vector_DEM_BB_LowPoint[k][i] = (Vector_DEM_BB_LowPoint[k][i] > aux_coor[i]) ? aux_coor[i] : Vector_DEM_BB_LowPoint[k][i];
Vector_DEM_BB_HighPoint[k][i] = (Vector_DEM_BB_HighPoint[k][i] < aux_coor[i]) ? aux_coor[i] : Vector_DEM_BB_HighPoint[k][i];
}
} //pragma
}//pragma parallel
//3. GATHER DEM BOUNDING BOX
for(int k = 0; k < num_of_threads; k++) {
for(std::size_t i = 0; i < 3; i++) {
DEM_BB_LowPoint[i] = (DEM_BB_LowPoint[i] > Vector_DEM_BB_LowPoint[k][i]) ? Vector_DEM_BB_LowPoint[k][i] : DEM_BB_LowPoint[i];
DEM_BB_HighPoint[i] = (DEM_BB_HighPoint[i] < Vector_DEM_BB_HighPoint[k][i]) ? Vector_DEM_BB_HighPoint[k][i] : DEM_BB_HighPoint[i];
}
Global_Ref_Radius = (Global_Ref_Radius < Vector_Ref_Radius[k]) ? Vector_Ref_Radius[k] : Global_Ref_Radius;
}
for(std::size_t i = 0; i < 3; i++) {
DEM_BB_LowPoint[i] -= 1.00f * Global_Ref_Radius;
DEM_BB_HighPoint[i] += 1.00f * Global_Ref_Radius;
}
//4. FIND THE FE INSIDE DEM_BB TO BUILD THE BINS AND CONSTRUCT THE GLOBAL BBX
#pragma omp parallel
{
int k = OpenMPUtils::ThisThread();
Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector[k].reserve(total_fem_partition_index[k+1]);
for(std::size_t i = 0; i < 3; i++) {
Vector_GLOBAL_BB_LowPoint[k][i] = inf;
Vector_GLOBAL_BB_HighPoint[k][i] = -inf;
}
array_1d<double, 3> rHighPoint;
array_1d<double, 3> rLowPoint;
#pragma omp for private(rHighPoint,rLowPoint)
for (int c = 0; c < (int)conditions_bins.size(); c++) {
Cond_iter it = conditions_bins.begin() + c;
const GeometryType& pGeometry = (*it)->GetGeometry();
noalias(rLowPoint) = pGeometry[0];
noalias(rHighPoint) = pGeometry[0];
for(unsigned int point = 1; point < pGeometry.size(); point++ ) {
for(unsigned int i = 0; i < 3; i++ ) {
rHighPoint[i] = ( rHighPoint[i] < pGeometry[point][i] ) ? pGeometry[point][i] : rHighPoint[i];
rLowPoint[i] = ( rLowPoint[i] > pGeometry[point][i] ) ? pGeometry[point][i] : rLowPoint[i];
}
}
bool add = true;
for(unsigned int i = 0; i < 3; i++) {
if(( rHighPoint[i] < DEM_BB_LowPoint[i] ) || ( rLowPoint[i] > DEM_BB_HighPoint[i] )) {
add = false;
break;
}
}
if(add) {
for(unsigned int i = 0; i < 3; i++ ) {
Vector_GLOBAL_BB_LowPoint[k][i] = (Vector_GLOBAL_BB_LowPoint[k][i] > rLowPoint[i]) ? rLowPoint[i] : Vector_GLOBAL_BB_LowPoint[k][i];
Vector_GLOBAL_BB_HighPoint[k][i] = (Vector_GLOBAL_BB_HighPoint[k][i] < rHighPoint[i]) ? rHighPoint[i] : Vector_GLOBAL_BB_HighPoint[k][i];
}
Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector[k].push_back(*it);
}
}//parallel for
}//parallel omp
//5. GATHER FEM ELEMENTS AND THE GLOBAL BBX
int fem_total_size = 0;
for(int k = 0; k < num_of_threads; k++) {
fem_total_size += Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector[k].size();
}
BinsConditionPointerToGeometricalObjecPointerTemporalVector.reserve(fem_total_size);
for(int k = 0; k < num_of_threads; k++) {
BinsConditionPointerToGeometricalObjecPointerTemporalVector.insert(
BinsConditionPointerToGeometricalObjecPointerTemporalVector.end(),
Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector[k].begin(),
Vector_BinsConditionPointerToGeometricalObjecPointerTemporalVector[k].end()
);
for(std::size_t i = 0; i < 3; i++) {
mGlobal_BB_LowPoint[i] = (mGlobal_BB_LowPoint[i] > Vector_GLOBAL_BB_LowPoint[k][i]) ? Vector_GLOBAL_BB_LowPoint[k][i] : mGlobal_BB_LowPoint[i];
mGlobal_BB_HighPoint[i] = (mGlobal_BB_HighPoint[i] < Vector_GLOBAL_BB_HighPoint[k][i]) ? Vector_GLOBAL_BB_HighPoint[k][i] : mGlobal_BB_HighPoint[i];
}
}
if(BinsConditionPointerToGeometricalObjecPointerTemporalVector.size() >0 ) {
//6. CREATE THE BINS
//if (mBins) free(mBins);
delete mBins;
mBins = new GeometricalBinsType(BinsConditionPointerToGeometricalObjecPointerTemporalVector.begin(), BinsConditionPointerToGeometricalObjecPointerTemporalVector.end());
//7. PERFORM THE SEARCH ON THE SPHERES
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for schedule(dynamic, 100)
for (int p = 0; p < (int)elements_sear.size(); p++) {
Elem_iter it = elements_sear.begin() + p;
GeometricalObject::Pointer go_it(*it);
bool search_particle = true;
array_1d<double, 3 > & aux_coor = go_it->GetGeometry()[0].Coordinates();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>((*it).get());
double Rad = p_particle->GetSearchRadius();
for(unsigned int i = 0; i < 3; i++ ) {
search_particle &= !(aux_coor[i] < (mGlobal_BB_LowPoint[i] - Rad) ) || (aux_coor[i] > (mGlobal_BB_HighPoint[i] + Rad) ); //amplify the BBX with the radius for every particle
}
if(search_particle) {
auto ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = (*mBins).SearchObjectsInRadiusExclusive(go_it,Rad,ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[p].reserve(NumberOfResults);
for(auto c_it = localResults.begin(); c_it != localResults.begin() + NumberOfResults; c_it++) {
auto presult = *c_it;
Condition::Pointer condition = dynamic_pointer_cast<Condition>(presult);
//Condition::Pointer condition = Kratos::dynamic_pointer_cast<Condition>(*c_it);
rResults[p].push_back(condition);
}
rResultsDistance[p].insert(rResultsDistance[p].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
} //loop elements
} //parallel
}//if Bins is not empty--> search
KRATOS_CATCH("")
}
array_1d<double, 3 > GetBBHighPoint() {
return (mGlobal_BB_HighPoint);
}
array_1d<double, 3 > GetBBLowPoint() {
return (mGlobal_BB_LowPoint);
}
/// Turn back information as a string.
virtual std::string Info() const override
{
std::stringstream buffer;
buffer << "DEM_FEM_Search" ;
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const override {rOStream << "DEM_FEM_Search";}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
array_1d<double, 3 > mGlobal_BB_HighPoint;
array_1d<double, 3 > mGlobal_BB_LowPoint;
array_1d<double, 3 > DEM_BB_HighPoint;
array_1d<double, 3 > DEM_BB_LowPoint;
GeometricalBinsType* mBins;
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
DEM_FEM_Search& operator=(DEM_FEM_Search const& rOther)
{
return *this;
}
/// Copy constructor.
DEM_FEM_Search(DEM_FEM_Search const& rOther)
{
*this = rOther;
}
}; // Class DEM_FEMSearch
} // namespace Kratos.
#endif // KRATOS_DEM_FEM_SEARCH_H_INCLUDED defined
|
DRB033-truedeplinear-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A linear expression is used as array subscription.
Data race pair: a[2*i+1]@64:5 vs. a[i]@64:14
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int i;
int a[2000];
#pragma omp parallel for
for (i=0; i<2000; i++)
a[i]=i;
for (i=0;i<1000;i++)
a[2*i+1]=a[i]+1;
printf("a[1001]=%d\n", a[1001]);
return 0;
}
|
9802.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel num_threads(1)
{
#pragma omp for schedule(static, 1)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp for private (j) schedule(static, 1)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
fill_ints_sr.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Hong-Zhou Ye <hzyechem@gmail.com>
*/
#include <stdlib.h>
#include <complex.h>
#include <assert.h>
#include <string.h>
#include <math.h>
#include "config.h"
#include "cint.h"
#include "vhf/fblas.h"
#include "np_helper/np_helper.h"
#define INTBUFMAX 1000
#define INTBUFMAX10 8000
#define IMGBLK 80
#define OF_CMPLX 2
int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
static double get_dsqure(double *ri, double *rj)
{
double dx = ri[0]-rj[0];
double dy = ri[1]-rj[1];
double dz = ri[2]-rj[2];
return dx*dx+dy*dy+dz*dz;
}
static void get_rc(double *rc, double *ri, double *rj, double ei, double ej) {
double eij = ei+ej;
rc[0] = (ri[0]*ei + rj[0]*ej) / eij;
rc[1] = (ri[1]*ei + rj[1]*ej) / eij;
rc[2] = (ri[2]*ei + rj[2]*ej) / eij;
}
static int shloc_partition(int *kshloc, int *ao_loc, int ksh0, int ksh1, int dkmax)
{
int ksh;
int nloc = 0;
int loclast = ao_loc[ksh0];
kshloc[0] = ksh0;
for (ksh = ksh0+1; ksh < ksh1; ksh++) {
assert(ao_loc[ksh+1] - ao_loc[ksh] < dkmax);
if (ao_loc[ksh+1] - loclast > dkmax) {
nloc += 1;
kshloc[nloc] = ksh;
loclast = ao_loc[ksh];
}
}
nloc += 1;
kshloc[nloc] = ksh1;
return nloc;
}
static void shift_bas(double *env_loc, double *env, double *Ls, int ptr, int iL)
{
env_loc[ptr+0] = env[ptr+0] + Ls[iL*3+0];
env_loc[ptr+1] = env[ptr+1] + Ls[iL*3+1];
env_loc[ptr+2] = env[ptr+2] + Ls[iL*3+2];
}
/* The following functions should be used solely for pyscf.pbc.df.rsdf.RSDF
*/
// non-split basis implementation of j2c
static void sort2c_ks1(double complex *out, double *bufr, double *bufi,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int jsh, int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t nij = naoi * naoj;
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dimax = ao_loc[msh1] - ao_loc[msh0];
const size_t dmjc = dimax * dj * comp;
out += jp;
int i, j, kk, ish, ic, di, dij;
size_t off;
double *pbr, *pbi;
double complex *pout;
for (kk = 0; kk < nkpts; kk++) {
off = kk * dmjc;
for (ish = msh0; ish < msh1; ish++) {
di = ao_loc[ish+1] - ao_loc[ish];
dij = di * dj;
for (ic = 0; ic < comp; ic++) {
pout = out + nij*ic + naoj*(ao_loc[ish]-ao_loc[ish0]);
pbr = bufr + off + dij*ic;
pbi = bufi + off + dij*ic;
for (j = 0; j < dj; j++) {
for (i = 0; i < di; i++) {
pout[i*naoj+j] = pbr[j*di+i] + pbi[j*di+i]*_Complex_I;
}
}
}
off += dij * comp;
}
out += nij * comp;
}
}
static void _nr2c_k_fill(int (*intor)(), double complex *out,
int nkpts, int comp, int nimgs, int jsh, int ish0,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
const int *refuniqshl_map, const double *uniq_Rcut2s,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const char TRANS_N = 'N';
const double D1 = 1;
const double D0 = 0;
ish0 += shls_slice[0];
jsh += jsh0;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
int dimax = INTBUFMAX10 / dj;
int ishloc[ish1-ish0+1];
int nishloc = shloc_partition(ishloc, ao_loc, ish0, ish1, dimax);
int m, msh0, msh1, dmjc, ish, di, empty;
int jL;
int shls[2];
double *bufk_r = buf;
double *bufk_i, *bufL, *pbuf, *pbuf2, *cache;
// >>>>>>>>>
int iptrxyz, dijc, ISH, JSH, IJSH, i;
JSH = refuniqshl_map[jsh-jsh0];
double *ri, *rj, Rij2, Rij2_cut;
const double omega = fabs(env_loc[PTR_RANGE_OMEGA]);
// <<<<<<<<<
shls[1] = jsh;
for (m = 0; m < nishloc; m++) {
msh0 = ishloc[m];
msh1 = ishloc[m+1];
dimax = ao_loc[msh1] - ao_loc[msh0];
dmjc = dj * dimax * comp;
bufk_i = bufk_r + dmjc * nkpts;
bufL = bufk_i + dmjc * nkpts;
pbuf2 = bufL + dmjc * nimgs;
cache = pbuf2 + dmjc;
pbuf = bufL;
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
// >>>>>>>>>>>
rj = env_loc + jptrxyz;
// <<<<<<<<<<<
for (ish = msh0; ish < msh1; ish++) {
shls[0] = ish;
di = ao_loc[ish+1] - ao_loc[ish];
dijc = di * dj * comp;
iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS];
ri = env_loc + iptrxyz;
Rij2 = get_dsqure(ri,rj);
ISH = refuniqshl_map[ish];
IJSH = (ISH>=JSH)?(ISH*(ISH+1)/2+JSH):(JSH*(JSH+1)/2+ISH);
Rij2_cut = uniq_Rcut2s[IJSH];
if (Rij2 < Rij2_cut) {
env_loc[PTR_RANGE_OMEGA] = 0.;
if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas,
env_loc, cintopt, cache)) {
empty = 0;
}
env_loc[PTR_RANGE_OMEGA] = omega;
if ((*intor)(pbuf2, NULL, shls, atm, natm, bas, nbas,
env_loc, cintopt, cache)) {
for (i = 0; i < dijc; ++i) {
pbuf[i] -= pbuf2[i];
}
}
}
else {
for (i = 0; i < dijc; ++i) {
pbuf[i] = 0.;
}
} // if Rij2
pbuf += dijc;
} // ish
} // jL
dgemm_(&TRANS_N, &TRANS_N, &dmjc, &nkpts, &nimgs,
&D1, bufL, &dmjc, expkL_r, &nimgs, &D0, bufk_r, &dmjc);
dgemm_(&TRANS_N, &TRANS_N, &dmjc, &nkpts, &nimgs,
&D1, bufL, &dmjc, expkL_i, &nimgs, &D0, bufk_i, &dmjc);
sort2c_ks1(out, bufk_r, bufk_i, shls_slice, ao_loc,
nkpts, comp, jsh, msh0, msh1);
}
}
void PBCsr2c_fill_ks1(int (*intor)(), double complex *out,
int nkpts, int comp, int nimgs, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
const int *refuniqshl_map, const double *uniq_Rcut2s,
int *atm, int natm, int *bas, int nbas, double *env)
{
_nr2c_k_fill(intor, out, nkpts, comp, nimgs, jsh, 0,
buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc,
cintopt,
refuniqshl_map, uniq_Rcut2s,
atm, natm, bas, nbas, env);
}
void PBCsr2c_fill_ks2(int (*intor)(), double complex *out,
int nkpts, int comp, int nimgs, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
const int *refuniqshl_map, const double *uniq_Rcut2s,
int *atm, int natm, int *bas, int nbas, double *env)
{
_nr2c_k_fill(intor, out, nkpts, comp, nimgs, jsh, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc,
cintopt,
refuniqshl_map, uniq_Rcut2s,
atm, natm, bas, nbas, env);
}
void PBCsr2c_k_drv(int (*intor)(), void (*fill)(), double complex *out,
int nkpts, int comp, int nimgs,
double *Ls, double complex *expkL,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
const int *refuniqshl_map, const double *uniq_Rcut2s,
int *atm, int natm, int *bas, int nbas, double *env,
int nenv)
{
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int njsh = jsh1 - jsh0;
double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX);
double *expkL_i = expkL_r + nimgs*nkpts;
int i;
for (i = 0; i < nimgs*nkpts; i++) {
expkL_r[i] = creal(expkL[i]);
expkL_i[i] = cimag(expkL[i]);
}
const int cache_size = GTOmax_cache_size(intor, shls_slice, 2,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int jsh;
double *env_loc = malloc(sizeof(double)*nenv);
NPdcopy(env_loc, env, nenv);
size_t count = nkpts * OF_CMPLX + nimgs + 1;
double *buf = malloc(sizeof(double)*(count*INTBUFMAX10*comp+cache_size));
#pragma omp for schedule(dynamic)
for (jsh = 0; jsh < njsh; jsh++) {
(*fill)(intor, out, nkpts, comp, nimgs, jsh,
buf, env_loc, Ls, expkL_r, expkL_i,
shls_slice, ao_loc, cintopt, refuniqshl_map, uniq_Rcut2s,
atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(expkL_r);
}
// non-split basis implementation of j3c
// Gamma point
static void sort3c_gs2_igtj(double *out, double *in, int *shls_slice,
int *ao_loc, int comp, int ish, int jsh,
int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok;
int i, j, k, ij, ksh, ic, dk, dijk;
double *pin, *pout;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0];
pin = in + dijk * ic;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
ij = j * di + i;
for (k = 0; k < dk; k++) {
pout[j*naok+k] = pin[k*dij+ij];
}
}
pout += (i+ao_loc[ish]+1) * naok;
}
}
in += dijk * comp;
}
}
static void sort3c_gs2_ieqj(double *out, double *in, int *shls_slice,
int *ao_loc, int comp, int ish, int jsh,
int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dij = di * di;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok;
int i, j, k, ij, ksh, ic, dk, dijk;
double *pin, *pout;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0];
pin = in + dijk * ic;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
ij = j * di + i;
for (k = 0; k < dk; k++) {
pout[j*naok+k] = pin[k*dij+ij];
}
}
pout += (i+ao_loc[ish]+1) * naok;
}
}
in += dijk * comp;
}
}
static void _nr3c_g(int (*intor)(), void (*fsort)(), double *out,
int comp, int nimgs,
int ish, int jsh,
double *buf, double *env_loc, double *Ls,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
jsh += jsh0;
ish += ish0;
int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS];
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int kptrxyz;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dkaomax = GTOmax_shell_dim(ao_loc, shls_slice+4, 1);
int dkmax = MAX(INTBUFMAX10 / dij, dkaomax); // buf can hold at least 1 ksh
int kshloc[ksh1-ksh0+1];
int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax);
int i, m, msh0, msh1, dijm;
int ksh, dk, dijkc;
int iL, jL;
int shls[3];
int dijmc = dij * dkmax * comp;
double *bufL = buf + dij*dkaomax;
double *cache = bufL + dijmc;
double *pbuf;
const double omega = fabs(env_loc[PTR_RANGE_OMEGA]);
shls[0] = ish;
shls[1] = jsh;
// >>>>>>>>
int Ish, Jsh, IJsh, Ksh, idij;
Ish = refuniqshl_map[ish];
Jsh = refuniqshl_map[jsh-nbas];
IJsh = (Ish>=Jsh)?(Ish*(Ish+1)/2+Jsh):(Jsh*(Jsh+1)/2+Ish);
const double *uniq_Rcut2s_IJ, *uniq_Rcut2s_K;
uniq_Rcut2s_IJ = uniq_Rcut2s + uniqshlpr_dij_loc[IJsh] * nbasauxuniq;
double *ri, *rj, *rk, rc[3];
double dij2, dij2_cut, inv_d0, Rijk2, Rcut2, ei, ej;
inv_d0 = 1./dcut_binsize;
dij2_cut = uniq_dcut2s[IJsh];
ei = uniqexp[Ish];
ej = uniqexp[Jsh];
// <<<<<<<<
for (m = 0; m < nkshloc; m++) {
msh0 = kshloc[m];
msh1 = kshloc[m+1];
dkmax = ao_loc[msh1] - ao_loc[msh0];
dijm = dij * dkmax;
dijmc = dijm * comp;
for (i = 0; i < dijmc; i++) {
bufL[i] = 0;
}
for (iL = 0; iL < nimgs; iL++) {
shift_bas(env_loc, env, Ls, iptrxyz, iL);
ri = env_loc + iptrxyz;
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
rj = env_loc + jptrxyz;
// >>>>>>>>
dij2 = get_dsqure(ri, rj);
if(dij2 > dij2_cut) {
continue;
}
idij = (int)(sqrt(dij2)*inv_d0);
uniq_Rcut2s_K = uniq_Rcut2s_IJ + idij * nbasauxuniq;
// <<<<<<<<
get_rc(rc, ri, rj, ei, ej);
pbuf = bufL;
for (ksh = msh0; ksh < msh1; ksh++) {
shls[2] = ksh;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijkc = dij*dk * comp;
Ksh = auxuniqshl_map[ksh-2*nbas];
Rcut2 = uniq_Rcut2s_K[Ksh];
kptrxyz = atm[PTR_COORD+bas[ATOM_OF+ksh*BAS_SLOTS]
*ATM_SLOTS];
rk = env_loc + kptrxyz;
Rijk2 = get_dsqure(rc, rk);
if(Rijk2 < Rcut2) {
env_loc[PTR_RANGE_OMEGA] = 0.;
if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas,
env_loc, cintopt, cache)) {
for (i = 0; i < dijkc; i++) {
pbuf[i] += buf[i];
}
}
env_loc[PTR_RANGE_OMEGA] = omega;
if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas,
env_loc, cintopt, cache)) {
for (i = 0; i < dijkc; i++) {
pbuf[i] -= buf[i];
}
}
} // if Rcut
pbuf += dijkc;
}
} // jL
} // iL
(*fsort)(out, bufL, shls_slice, ao_loc, comp, ish, jsh, msh0, msh1);
}
}
void PBCsr3c_gs2(int (*intor)(), double *out,
int comp, int nimgs,
int ish, int jsh,
double *buf, double *env_loc, double *Ls,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_nr3c_g(intor, &sort3c_gs2_igtj, out,
comp, nimgs, ish, jsh,
buf, env_loc, Ls,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map,
nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize,
uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
} else if (ip == jp) {
_nr3c_g(intor, &sort3c_gs2_ieqj, out,
comp, nimgs, ish, jsh,
buf, env_loc, Ls,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map,
nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize,
uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
}
void PBCsr3c_g_drv(int (*intor)(), void (*fill)(), double *out,
int comp, int nimgs,
double *Ls,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt, int8_t *shlpr_mask,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env, int nenv)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
int di = GTOmax_shell_dim(ao_loc, shls_slice+0, 1);
int dj = GTOmax_shell_dim(ao_loc, shls_slice+2, 1);
int dk = GTOmax_shell_dim(ao_loc, shls_slice+4, 1);
int dijk = di*dj*dk;
size_t count = (MAX(INTBUFMAX10, dijk) + dijk) * comp;
const int cache_size = GTOmax_cache_size(intor, shls_slice, 3,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int ish, jsh, ij;
double *env_loc = malloc(sizeof(double)*nenv);
memcpy(env_loc, env, sizeof(double)*nenv);
double *buf = malloc(sizeof(double)*(count+cache_size));
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
ish = ij / njsh;
jsh = ij % njsh;
if (!shlpr_mask[ij]) {
continue;
}
(*fill)(intor, out, comp, nimgs,
ish, jsh,
buf, env_loc, Ls,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map,
nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize,
uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
}
// single k-point, bvk
static void sort3c_ks1(double complex *out, double *bufr, double *bufi,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int ish, int jsh, int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t njk = naoj * naok;
const size_t nijk = njk * naoi;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
out += (ip * naoj + jp) * naok;
int i, j, k, kk, ksh, ic, dk, dijk;
size_t off;
double *pbr, *pbi;
double complex *pout;
for (kk = 0; kk < nkpts; kk++) {
off = kk * dijmc;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
pbr = bufr + off + dijk*ic;
pbi = bufi + off + dijk*ic;
for (j = 0; j < dj; j++) {
for (k = 0; k < dk; k++) {
for (i = 0; i < di; i++) {
pout[i*njk+k] = pbr[k*dij+i] + pbi[k*dij+i]*_Complex_I;
}
}
pout += naok;
pbr += di;
pbi += di;
}
}
off += dijk * comp;
}
out += nijk * comp;
}
}
static void sort3c_ks2_igtj(double complex *out, double *bufr, double *bufi,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int ish, int jsh, int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off0 = ((size_t)ao_loc[ish0]) * (ao_loc[ish0] + 1) / 2;
const size_t nij = ((size_t)ao_loc[ish1]) * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (((size_t)ao_loc[ish])*(ao_loc[ish]+1)/2-off0 + jp) * naok;
int i, j, k, ij, kk, ksh, ic, dk, dijk;
size_t off;
double *pbr, *pbi;
double complex *pout;
for (kk = 0; kk < nkpts; kk++) {
off = kk * dijmc;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
pbr = bufr + off + dijk*ic;
pbi = bufi + off + dijk*ic;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
ij = j * di + i;
for (k = 0; k < dk; k++) {
pout[j*naok+k] = pbr[k*dij+ij] + pbi[k*dij+ij]*_Complex_I;
}
}
pout += (i+ao_loc[ish]+1) * naok;
}
}
off += dijk * comp;
}
out += nijk * comp;
}
}
static void sort3c_ks2_ieqj(double complex *out, double *bufr, double *bufi,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int ish, int jsh, int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off0 = ((size_t)ao_loc[ish0]) * (ao_loc[ish0] + 1) / 2;
const size_t nij = ((size_t)ao_loc[ish1]) * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (((size_t)ao_loc[ish])*(ao_loc[ish]+1)/2-off0 + jp) * naok;
int i, j, k, ij, kk, ksh, ic, dk, dijk;
size_t off;
double *pbr, *pbi;
double complex *pout;
for (kk = 0; kk < nkpts; kk++) {
off = kk * dijmc;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
pbr = bufr + off + dijk*ic;
pbi = bufi + off + dijk*ic;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
ij = j * di + i;
for (k = 0; k < dk; k++) {
pout[j*naok+k] = pbr[k*dij+ij] + pbi[k*dij+ij]*_Complex_I;
}
}
pout += (i+ao_loc[ish]+1) * naok;
}
}
off += dijk * comp;
}
out += nijk * comp;
}
}
static void _nr3c_bvk_k(int (*intor)(), void (*fsort)(),
double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int bvk_nimgs,
int ish, int jsh, int *cell_loc_bvk,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const char TRANS_N = 'N';
const double D1 = 1;
jsh += jsh0;
ish += ish0;
int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS];
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int kptrxyz;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dkaomax = GTOmax_shell_dim(ao_loc, shls_slice+4, 1);
int dkmax = MAX(INTBUFMAX10 / dij, dkaomax); // buf can hold at least 1 ksh
int kshloc[ksh1-ksh0+1];
int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax);
int i, m, msh0, msh1, dijmc, empty;
size_t dijmk, dijkc;
int ksh, dk;
int iL_bvk, iL0, iL1, iL, jL_bvk, jL0, jL1, jL;
int shls[3];
double *bufexp_r = buf;
double *bufexp_i = bufexp_r + bvk_nimgs * nkpts;
double *bufk_r = bufexp_i + bvk_nimgs * nkpts;
double *bufk_i, *bufL, *pbufL, *pbuf, *pbuf1, *pbuf2, *cache;
shls[0] = ish;
shls[1] = jsh;
// >>>>>>>>>>
const double omega = fabs(env_loc[PTR_RANGE_OMEGA]);
int Ish, Jsh, IJsh, Ksh, idij, kiLj, kiLi;
Ish = refuniqshl_map[ish];
Jsh = refuniqshl_map[jsh-nbas];
IJsh = (Ish>=Jsh)?(Ish*(Ish+1)/2+Jsh):(Jsh*(Jsh+1)/2+Ish);
const double *uniq_Rcut2s_IJ, *uniq_Rcut2s_K;
uniq_Rcut2s_IJ = uniq_Rcut2s + uniqshlpr_dij_loc[IJsh] * nbasauxuniq;
double *ri, *rj, *rk, rc[3];
double dij2, dij2_cut, inv_d0, Rijk2, Rcut2, ei, ej;
inv_d0 = 1./dcut_binsize;
dij2_cut = uniq_dcut2s[IJsh];
ei = uniqexp[Ish];
ej = uniqexp[Jsh];
// <<<<<<<<<<
for (m = 0; m < nkshloc; m++) {
msh0 = kshloc[m];
msh1 = kshloc[m+1];
dkmax = ao_loc[msh1] - ao_loc[msh0];
dijmc = dij * dkmax * comp;
dijmk = dijmc * nkpts;
bufk_i = bufk_r + dijmk;
bufL = bufk_i + dijmk;
pbuf = bufL + ((size_t)bvk_nimgs) * dijmc;
pbuf2 = pbuf + dijmc;
cache = pbuf2 + dijmc;
for (i = 0; i < dijmk*OF_CMPLX; i++) {
bufk_r[i] = 0;
}
for (iL_bvk = 0; iL_bvk < bvk_nimgs; iL_bvk++) {
for (i = 0; i < dijmc*bvk_nimgs; ++i) {
bufL[i] = 0;
}
iL0 = cell_loc_bvk[iL_bvk];
iL1 = cell_loc_bvk[iL_bvk+1];
for (jL_bvk = 0; jL_bvk < bvk_nimgs; jL_bvk++) {
pbufL = bufL + jL_bvk * dijmc;
jL0 = cell_loc_bvk[jL_bvk];
jL1 = cell_loc_bvk[jL_bvk+1];
for (iL = iL0; iL < iL1; iL++) {
shift_bas(env_loc, env, Ls, iptrxyz, iL);
ri = env_loc + iptrxyz;
for (jL = jL0; jL < jL1; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
rj = env_loc + jptrxyz;
// >>>>>>>>
dij2 = get_dsqure(ri, rj);
if(dij2 > dij2_cut) {
continue;
}
idij = (int)(sqrt(dij2)*inv_d0);
uniq_Rcut2s_K = uniq_Rcut2s_IJ + idij * nbasauxuniq;
// <<<<<<<<
get_rc(rc, ri, rj, ei, ej);
pbuf1 = pbuf;
for (ksh = msh0; ksh < msh1; ksh++) {
shls[2] = ksh;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijkc = dij * dk * comp;
Ksh = auxuniqshl_map[ksh-2*nbas];
Rcut2 = uniq_Rcut2s_K[Ksh];
kptrxyz = atm[PTR_COORD+bas[ATOM_OF+
ksh*BAS_SLOTS]*ATM_SLOTS];
rk = env_loc + kptrxyz;
Rijk2 = get_dsqure(rc, rk);
if (Rijk2 < Rcut2) {
env_loc[PTR_RANGE_OMEGA] = 0.;
if ((*intor)(pbuf1, NULL, shls, atm, natm,
bas, nbas, env_loc, cintopt,
cache)) {
empty = 0;
}
env_loc[PTR_RANGE_OMEGA] = omega;
if ((*intor)(pbuf2, NULL, shls, atm, natm,
bas, nbas, env_loc, cintopt,
cache)) {
for (i = 0; i < dijkc; i++) {
pbuf1[i] -= pbuf2[i];
}
}
} else {
for (i = 0; i < dijkc; i++) {
pbuf1[i] = 0;
}
} // if Rijk2
pbuf1 += dijkc;
} // ksh
for (i = 0; i < dijmc; i++) {
pbufL[i] += pbuf[i];
}
} // jL
} // iL
// ('k,kL->kL', conj(expkL[iL]), expkL)
for (i = 0; i < nkpts; i++) {
kiLj = i*bvk_nimgs+jL_bvk;
kiLi = i*bvk_nimgs+iL_bvk;
bufexp_r[kiLj] = expkL_r[kiLj] * expkL_r[kiLi];
bufexp_r[kiLj]+= expkL_i[kiLj] * expkL_i[kiLi];
bufexp_i[kiLj] = expkL_i[kiLj] * expkL_r[kiLi];
bufexp_i[kiLj]-= expkL_r[kiLj] * expkL_i[kiLi];
}
} // jL_bvk
dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &bvk_nimgs,
&D1, bufL, &dijmc, bufexp_r, &bvk_nimgs, &D1, bufk_r, &dijmc);
dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &bvk_nimgs,
&D1, bufL, &dijmc, bufexp_i, &bvk_nimgs, &D1, bufk_i, &dijmc);
} // iL_bvk
(*fsort)(out, bufk_r, bufk_i, shls_slice, ao_loc, nkpts, comp,
ish, jsh, msh0, msh1);
}
}
void PBCsr3c_bvk_ks1(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int bvk_nimgs,
int ish, int jsh, int *cell_loc_bvk,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
_nr3c_bvk_k(intor, sort3c_ks1, out,
nkpts_ij, nkpts, comp, nimgs, bvk_nimgs, ish, jsh, cell_loc_bvk,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map, nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize, uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
void PBCsr3c_bvk_ks2(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int bvk_nimgs,
int ish, int jsh, int *cell_loc_bvk,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_nr3c_bvk_k(intor, &sort3c_ks2_igtj, out,
nkpts_ij, nkpts, comp, nimgs,
bvk_nimgs, ish, jsh, cell_loc_bvk,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map, nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize, uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
} else if (ip == jp) {
_nr3c_bvk_k(intor, &sort3c_ks2_ieqj, out,
nkpts_ij, nkpts, comp, nimgs,
bvk_nimgs, ish, jsh, cell_loc_bvk,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map, nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize, uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
}
void PBCsr3c_bvk_k_drv(int (*intor)(), void (*fill)(), double *out,
int nkpts_ij, int nkpts,
int comp, int nimgs, int bvk_nimgs,
double *Ls,
double complex *expkL,
int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *cell_loc_bvk, int8_t *shlpr_mask,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env,
int nenv)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
double *expkL_r = malloc(sizeof(double) * bvk_nimgs*nkpts * OF_CMPLX);
double *expkL_i = expkL_r + bvk_nimgs*nkpts;
int i;
for (i = 0; i < bvk_nimgs*nkpts; i++) {
expkL_r[i] = creal(expkL[i]);
expkL_i[i] = cimag(expkL[i]);
}
int di = GTOmax_shell_dim(ao_loc, shls_slice+0, 1);
int dj = GTOmax_shell_dim(ao_loc, shls_slice+2, 1);
int dk = GTOmax_shell_dim(ao_loc, shls_slice+4, 1);
int dijk = di*dj*dk;
int dijmk = MAX(INTBUFMAX10, dijk);
size_t count = (nkpts*OF_CMPLX + bvk_nimgs + 2) * dijmk * comp +
nkpts*bvk_nimgs*OF_CMPLX;
const int cache_size = GTOmax_cache_size(intor, shls_slice, 3,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int ish, jsh, ij;
double *env_loc = malloc(sizeof(double)*nenv);
memcpy(env_loc, env, sizeof(double)*nenv);
double *buf = malloc(sizeof(double)*(count+cache_size));
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
ish = ij / njsh;
jsh = ij % njsh;
if (!shlpr_mask[ij]) {
continue;
}
(*fill)(intor, out, nkpts_ij, nkpts, comp, nimgs, bvk_nimgs,
ish, jsh, cell_loc_bvk,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map,
nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize,
uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(expkL_r);
}
// single k-point, no bvk
static void _nr3c_k(int (*intor)(), void (*fsort)(),
double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const char TRANS_N = 'N';
const double D1 = 1;
jsh += jsh0;
ish += ish0;
int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS];
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int kptrxyz;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dkaomax = GTOmax_shell_dim(ao_loc, shls_slice+4, 1);
int dkmax = MAX(INTBUFMAX10 / dij, dkaomax); // buf can hold at least 1 ksh
int kshloc[ksh1-ksh0+1];
int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax);
int i, m, msh0, msh1, dijmc, empty;
size_t dijmk, dijkc;
int ksh, dk;
int iL, jL, jLcount;
int shls[3];
double *bufexp_r = buf;
double *bufexp_i = bufexp_r + nimgs * nkpts;
double *bufk_r = bufexp_i + nimgs * nkpts;
double *bufk_i, *bufL, *pbuf, *pbuf2, *cache;
shls[0] = ish;
shls[1] = jsh;
// >>>>>>>>>>
const double omega = fabs(env_loc[PTR_RANGE_OMEGA]);
int Ish, Jsh, IJsh, Ksh, idij, kiLj, kiLjc, kiLi;
Ish = refuniqshl_map[ish];
Jsh = refuniqshl_map[jsh-nbas];
IJsh = (Ish>=Jsh)?(Ish*(Ish+1)/2+Jsh):(Jsh*(Jsh+1)/2+Ish);
const double *uniq_Rcut2s_IJ, *uniq_Rcut2s_K;
uniq_Rcut2s_IJ = uniq_Rcut2s + uniqshlpr_dij_loc[IJsh] * nbasauxuniq;
double *ri, *rj, *rk, rc[3];
double dij2, dij2_cut, inv_d0, Rijk2, Rcut2, ei, ej;
inv_d0 = 1./dcut_binsize;
dij2_cut = uniq_dcut2s[IJsh];
ei = uniqexp[Ish];
ej = uniqexp[Jsh];
// <<<<<<<<<<
for (m = 0; m < nkshloc; m++) {
msh0 = kshloc[m];
msh1 = kshloc[m+1];
dkmax = ao_loc[msh1] - ao_loc[msh0];
dijmc = dij * dkmax * comp;
dijmk = dijmc * nkpts;
bufk_i = bufk_r + dijmk;
bufL = bufk_i + dijmk;
pbuf2 = bufL + ((size_t)nimgs) * dijmc;
cache = pbuf2 + dijmc;
for (i = 0; i < dijmk*OF_CMPLX; i++) {
bufk_r[i] = 0;
}
for (iL = 0; iL < nimgs; iL++) {
shift_bas(env_loc, env, Ls, iptrxyz, iL);
ri = env_loc + iptrxyz;
pbuf = bufL;
jLcount = 0;
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
rj = env_loc + jptrxyz;
// >>>>>>>>
dij2 = get_dsqure(ri, rj);
if(dij2 > dij2_cut) {
continue;
}
idij = (int)(sqrt(dij2)*inv_d0);
uniq_Rcut2s_K = uniq_Rcut2s_IJ + idij * nbasauxuniq;
// <<<<<<<<
get_rc(rc, ri, rj, ei, ej);
for (ksh = msh0; ksh < msh1; ksh++) {
shls[2] = ksh;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijkc = dij * dk * comp;
Ksh = auxuniqshl_map[ksh-2*nbas];
Rcut2 = uniq_Rcut2s_K[Ksh];
kptrxyz = atm[PTR_COORD+bas[ATOM_OF+
ksh*BAS_SLOTS]*ATM_SLOTS];
rk = env_loc + kptrxyz;
Rijk2 = get_dsqure(rc, rk);
if (Rijk2 < Rcut2) {
env_loc[PTR_RANGE_OMEGA] = 0.;
if ((*intor)(pbuf, NULL, shls, atm, natm,
bas, nbas, env_loc, cintopt,
cache)) {
empty = 0;
}
env_loc[PTR_RANGE_OMEGA] = omega;
if ((*intor)(pbuf2, NULL, shls, atm, natm,
bas, nbas, env_loc, cintopt,
cache)) {
for (i = 0; i < dijkc; i++) {
pbuf[i] -= pbuf2[i];
}
}
} else {
for (i = 0; i < dijkc; i++) {
pbuf[i] = 0;
}
} // if Rijk2
pbuf += dijkc;
} // ksh
// ('k,kL->kL', conj(expkL[iL]), expkL)
for (i = 0; i < nkpts; i++) {
kiLjc = i*nimgs+jLcount;
kiLj = i*nimgs+jL;
kiLi = i*nimgs+iL;
bufexp_r[kiLjc] = expkL_r[kiLj] * expkL_r[kiLi];
bufexp_r[kiLjc]+= expkL_i[kiLj] * expkL_i[kiLi];
bufexp_i[kiLjc] = expkL_i[kiLj] * expkL_r[kiLi];
bufexp_i[kiLjc]-= expkL_r[kiLj] * expkL_i[kiLi];
}
++jLcount;
} // jL
dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &jLcount,
&D1, bufL, &dijmc, bufexp_r, &nimgs, &D1, bufk_r, &dijmc);
dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &jLcount,
&D1, bufL, &dijmc, bufexp_i, &nimgs, &D1, bufk_i, &dijmc);
} // iL
(*fsort)(out, bufk_r, bufk_i, shls_slice, ao_loc, nkpts, comp,
ish, jsh, msh0, msh1);
}
}
void PBCsr3c_ks1(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
_nr3c_k(intor, sort3c_ks1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map, nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize, uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
void PBCsr3c_ks2(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_nr3c_k(intor, &sort3c_ks2_igtj, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map, nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize, uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
} else if (ip == jp) {
_nr3c_k(intor, &sort3c_ks2_ieqj, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map, nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize, uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
}
void PBCsr3c_k_drv(int (*intor)(), void (*fill)(), double *out,
int nkpts_ij, int nkpts,
int comp, int nimgs,
double *Ls,
double complex *expkL,
int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int8_t *shlpr_mask,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env,
int nenv)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX);
double *expkL_i = expkL_r + nimgs*nkpts;
int i;
for (i = 0; i < nimgs*nkpts; i++) {
expkL_r[i] = creal(expkL[i]);
expkL_i[i] = cimag(expkL[i]);
}
int di = GTOmax_shell_dim(ao_loc, shls_slice+0, 1);
int dj = GTOmax_shell_dim(ao_loc, shls_slice+2, 1);
int dk = GTOmax_shell_dim(ao_loc, shls_slice+4, 1);
int dijk = di*dj*dk;
int dijmk = MAX(INTBUFMAX10, dijk);
size_t count = (nkpts*OF_CMPLX + nimgs + 2) * dijmk * comp +
nkpts*nimgs*OF_CMPLX;
const int cache_size = GTOmax_cache_size(intor, shls_slice, 3,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int ish, jsh, ij;
double *env_loc = malloc(sizeof(double)*nenv);
memcpy(env_loc, env, sizeof(double)*nenv);
double *buf = malloc(sizeof(double)*(count+cache_size));
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
ish = ij / njsh;
jsh = ij % njsh;
if (!shlpr_mask[ij]) {
continue;
}
(*fill)(intor, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map,
nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize,
uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(expkL_r);
}
// k-point pairs, bvk
static void sort3c_kks1(double complex *out, double *bufr, double *bufi,
int *kptij_idx, int *shls_slice, int *ao_loc,
int nkpts, int nkpts_ij, int comp, int ish, int jsh,
int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t njk = naoj * naok;
const size_t nijk = njk * naoi;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
out += (ip * naoj + jp) * naok;
int i, j, k, kk, ik, jk, ksh, ic, dk, dijk;
size_t off;
double *pbr, *pbi;
double complex *pout;
for (kk = 0; kk < nkpts_ij; kk++) {
ik = kptij_idx[kk] / nkpts;
jk = kptij_idx[kk] % nkpts;
off = (ik*nkpts+jk) * dijmc;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
pbr = bufr + off + dijk*ic;
pbi = bufi + off + dijk*ic;
for (j = 0; j < dj; j++) {
for (k = 0; k < dk; k++) {
for (i = 0; i < di; i++) {
pout[i*njk+k] = pbr[k*dij+i] +
pbi[k*dij+i]*_Complex_I;
}
}
pout += naok;
pbr += di;
pbi += di;
}
}
off += dijk * comp;
}
out += nijk * comp;
}
}
static void sort3c_kks2_igtj(double complex *out, double *bufr, double *bufi,
int *kptij_idx, int *shls_slice, int *ao_loc,
int nkpts, int nkpts_ij, int comp, int ish, int jsh,
int msh0, int msh1)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
assert(naoi == naoj);
const size_t njk = naoj * naok;
const size_t nijk = njk * naoi;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dij = di * dj;
const int dkmax = ao_loc[msh1] - ao_loc[msh0];
const size_t dijmc = dij * dkmax * comp;
double complex *outij = out + (ip * naoj + jp) * naok;
double complex *outji = out + (jp * naoj + ip) * naok;
int i, j, k, kk, ik, jk, ksh, ic, dk, dijk;
size_t offij, offji;
double *pbij_r, *pbij_i, *pbji_r, *pbji_i;
double complex *poutij, *poutji;
for (kk = 0; kk < nkpts_ij; kk++) {
ik = kptij_idx[kk] / nkpts;
jk = kptij_idx[kk] % nkpts;
offij = (ik*nkpts+jk) * dijmc;
offji = (jk*nkpts+ik) * dijmc;
for (ksh = msh0; ksh < msh1; ksh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijk = dij * dk;
for (ic = 0; ic < comp; ic++) {
poutij = outij + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
poutji = outji + nijk*ic + ao_loc[ksh]-ao_loc[ksh0];
pbij_r = bufr + offij + dijk*ic;
pbij_i = bufi + offij + dijk*ic;
pbji_r = bufr + offji + dijk*ic;
pbji_i = bufi + offji + dijk*ic;
for (j = 0; j < dj; j++) {
for (k = 0; k < dk; k++) {
for (i = 0; i < di; i++) {
poutij[i*njk +k] = pbij_r[k*dij+i] + pbij_i[k*dij+i]*_Complex_I;
poutji[i*naok+k] = pbji_r[k*dij+i] - pbji_i[k*dij+i]*_Complex_I;
}
}
poutij += naok;
poutji += njk;
pbij_r += di;
pbij_i += di;
pbji_r += di;
pbji_i += di;
}
}
offij += dijk * comp;
offji += dijk * comp;
}
outij += nijk * comp;
outji += nijk * comp;
}
}
static void _nr3c_bvk_kk(int (*intor)(), void (*fsort)(),
double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int bvk_nimgs,
int ish, int jsh, int *cell_loc_bvk,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const double ND1 = -1;
jsh += jsh0;
ish += ish0;
int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS];
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int kptrxyz;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dkaomax = GTOmax_shell_dim(ao_loc, shls_slice+4, 1);
int dkmax = MAX(INTBUFMAX / dij, dkaomax);
int kshloc[ksh1-ksh0+1];
int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax);
int i, m, msh0, msh1, dijm, dijmc, dijmk, dijkc, empty;
int ksh, dk;
int iL_bvk, iL0_bvk, iLcount_bvk, iL0, iL1, iL, jL_bvk, jL0, jL1, jL;
int shls[3];
double *bufkk_r, *bufkk_i, *bufkL_r, *bufkL_i, *bufL, *pbuf, *cache;
double *buf_rs, *buf_rs0, *pbuf_rs;
const double omega = fabs(env_loc[PTR_RANGE_OMEGA]);
shls[0] = ish;
shls[1] = jsh;
// >>>>>>>>
int Ish, Jsh, IJsh, Ksh, idij;
Ish = refuniqshl_map[ish];
Jsh = refuniqshl_map[jsh-nbas];
IJsh = (Ish>=Jsh)?(Ish*(Ish+1)/2+Jsh):(Jsh*(Jsh+1)/2+Ish);
const double *uniq_Rcut2s_IJ, *uniq_Rcut2s_K;
uniq_Rcut2s_IJ = uniq_Rcut2s + uniqshlpr_dij_loc[IJsh] * nbasauxuniq;
double *ri, *rj, *rk, rc[3];
double dij2, dij2_cut, inv_d0, Rijk2, Rcut2, ei, ej;
inv_d0 = 1./dcut_binsize;
dij2_cut = uniq_dcut2s[IJsh];
ei = uniqexp[Ish];
ej = uniqexp[Jsh];
// <<<<<<<<
for (m = 0; m < nkshloc; m++) {
msh0 = kshloc[m];
msh1 = kshloc[m+1];
dkmax = ao_loc[msh1] - ao_loc[msh0];
dijm = dij * dkmax;
dijmc = dijm * comp;
dijmk = dijmc * nkpts;
bufkk_r = buf;
bufkk_i = bufkk_r + (size_t)nkpts * dijmk;
bufkL_r = bufkk_i + (size_t)nkpts * dijmk;
bufkL_i = bufkL_r + (size_t)MIN(bvk_nimgs,IMGBLK) * dijmk;
bufL = bufkL_i + (size_t)MIN(bvk_nimgs,IMGBLK) * dijmk;
buf_rs0 = bufL + (size_t)bvk_nimgs * dijmc;
pbuf_rs = buf_rs0 + (size_t)dijmc;
cache = pbuf_rs + (size_t)dijmc;
for (i = 0; i < nkpts*dijmk*OF_CMPLX; i++) {
bufkk_r[i] = 0;
}
for (iL0_bvk = 0; iL0_bvk < bvk_nimgs; iL0_bvk+=IMGBLK) {
iLcount_bvk = MIN(IMGBLK, bvk_nimgs - iL0_bvk);
for (iL_bvk = iL0_bvk; iL_bvk < iL0_bvk+iLcount_bvk; iL_bvk++) {
for (i = 0; i < dijmc*bvk_nimgs; i++) {
bufL[i] = 0;
}
iL0 = cell_loc_bvk[iL_bvk];
iL1 = cell_loc_bvk[iL_bvk+1];
for (jL_bvk = 0; jL_bvk < bvk_nimgs; jL_bvk++) {
pbuf = bufL + dijmc * jL_bvk;
jL0 = cell_loc_bvk[jL_bvk];
jL1 = cell_loc_bvk[jL_bvk+1];
for (iL = iL0; iL < iL1; iL++) {
shift_bas(env_loc, env, Ls, iptrxyz, iL);
ri = env_loc + iptrxyz;
for (jL = jL0; jL < jL1; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
rj = env_loc + jptrxyz;
// >>>>>>>>
dij2 = get_dsqure(ri, rj);
if(dij2 > dij2_cut) {
continue;
}
idij = (int)(sqrt(dij2)*inv_d0);
uniq_Rcut2s_K = uniq_Rcut2s_IJ + idij * nbasauxuniq;
// <<<<<<<<
get_rc(rc, ri, rj, ei, ej);
buf_rs = buf_rs0;
for (ksh = msh0; ksh < msh1; ksh++) {
shls[2] = ksh;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijkc = dij * dk * comp;
Ksh = auxuniqshl_map[ksh-2*nbas];
Rcut2 = uniq_Rcut2s_K[Ksh];
kptrxyz = atm[PTR_COORD+bas[ATOM_OF+
ksh*BAS_SLOTS]*ATM_SLOTS];
rk = env_loc + kptrxyz;
Rijk2 = get_dsqure(rc, rk);
if (Rijk2 < Rcut2) {
env_loc[PTR_RANGE_OMEGA] = 0.;
if ((*intor)(buf_rs, NULL, shls, atm, natm,
bas, nbas, env_loc, cintopt,
cache)) {
empty = 0;
}
env_loc[PTR_RANGE_OMEGA] = omega;
if ((*intor)(pbuf_rs, NULL, shls, atm, natm,
bas, nbas, env_loc, cintopt,
cache)) {
for (i = 0; i < dijkc; i++) {
buf_rs[i] -= pbuf_rs[i];
}
}
} else {
for (i = 0; i < dijkc; i++) {
buf_rs[i] = 0;
}
} // if Rijk2
buf_rs += dijkc;
} // ksh
for (i = 0; i < dijmc; i++) {
pbuf[i] += buf_rs0[i];
}
} // jL
} // iL
} // jL_bvk
dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &bvk_nimgs,
&D1, bufL, &dijmc, expkL_r, &bvk_nimgs,
&D0, bufkL_r+(iL_bvk-iL0_bvk)*(size_t)dijmk, &dijmc);
dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &bvk_nimgs,
&D1, bufL, &dijmc, expkL_i, &bvk_nimgs,
&D0, bufkL_i+(iL_bvk-iL0_bvk)*(size_t)dijmk, &dijmc);
} // iL_bvk
// conj(exp(1j*dot(h,k)))
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount_bvk,
&D1, bufkL_r, &dijmk, expkL_r+(size_t)iL0_bvk, &bvk_nimgs,
&D1, bufkk_r, &dijmk);
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount_bvk,
&D1, bufkL_i, &dijmk, expkL_i+(size_t)iL0_bvk, &bvk_nimgs,
&D1, bufkk_r, &dijmk);
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount_bvk,
&D1, bufkL_i, &dijmk, expkL_r+(size_t)iL0_bvk, &bvk_nimgs,
&D1, bufkk_i, &dijmk);
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount_bvk,
&ND1, bufkL_r, &dijmk, expkL_i+(size_t)iL0_bvk, &bvk_nimgs,
&D1, bufkk_i, &dijmk);
} // iL0_bvk
(*fsort)(out, bufkk_r, bufkk_i, kptij_idx, shls_slice, ao_loc,
nkpts, nkpts_ij, comp, ish, jsh, msh0, msh1);
} // m
}
void PBCsr3c_bvk_kks2(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int bvk_nimgs,
int ish, int jsh, int *cell_loc_bvk,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_nr3c_bvk_kk(intor, &sort3c_kks2_igtj, out,
nkpts_ij, nkpts, comp, nimgs, bvk_nimgs,
ish, jsh, cell_loc_bvk,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map,
nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize,
uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
} else if (ip == jp) {
_nr3c_bvk_kk(intor, &sort3c_kks1, out,
nkpts_ij, nkpts, comp, nimgs, bvk_nimgs,
ish, jsh, cell_loc_bvk,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map,
nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize,
uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
}
void PBCsr3c_bvk_kk_drv(int (*intor)(), void (*fill)(), double *out,
int nkpts_ij, int nkpts,
int comp, int nimgs, int bvk_nimgs,
double *Ls,
double complex *expkL,
int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *cell_loc_bvk, int8_t *shlpr_mask,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env,
int nenv)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
double *expkL_r = malloc(sizeof(double) * bvk_nimgs*nkpts * OF_CMPLX);
double *expkL_i = expkL_r + bvk_nimgs*nkpts;
int i;
for (i = 0; i < bvk_nimgs*nkpts; i++) {
expkL_r[i] = creal(expkL[i]);
expkL_i[i] = cimag(expkL[i]);
}
int di = GTOmax_shell_dim(ao_loc, shls_slice+0, 1);
int dj = GTOmax_shell_dim(ao_loc, shls_slice+2, 1);
int dk = GTOmax_shell_dim(ao_loc, shls_slice+4, 1);
int dijk = di*dj*dk;
int dijmk = MAX(INTBUFMAX, dijk);
size_t count = ((nkpts + MIN(bvk_nimgs,IMGBLK))*nkpts * OF_CMPLX +
bvk_nimgs + 2) * dijmk * comp;
const int cache_size = GTOmax_cache_size(intor, shls_slice, 3,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int ish, jsh, ij;
double *env_loc = malloc(sizeof(double)*nenv);
memcpy(env_loc, env, sizeof(double)*nenv);
double *buf = malloc(sizeof(double)*(count+cache_size));
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
ish = ij / njsh;
jsh = ij % njsh;
if (!shlpr_mask[ij]) {
continue;
}
(*fill)(intor, out, nkpts_ij, nkpts, comp, nimgs, bvk_nimgs,
ish, jsh, cell_loc_bvk,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map,
nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize,
uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(expkL_r);
}
// k-point pairs, no bvk
static void _nr3c_kk(int (*intor)(), void (*fsort)(),
double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const double ND1 = -1;
jsh += jsh0;
ish += ish0;
int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS];
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int kptrxyz;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dkaomax = GTOmax_shell_dim(ao_loc, shls_slice+4, 1);
int dkmax = MAX(INTBUFMAX / dij, dkaomax); // buf can hold at least 1 ksh
int kshloc[ksh1-ksh0+1];
int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax);
int i, m, msh0, msh1, dijm, dijmc, dijmk, dijkc, empty;
int ksh, dk, iL0, iL, jL, iLcount;
int shls[3];
double *bufkk_r, *bufkk_i, *bufkL_r, *bufkL_i, *bufL, *pbuf, *pbuf2, *cache;
shls[0] = ish;
shls[1] = jsh;
// >>>>>>>>>>
const double omega = fabs(env_loc[PTR_RANGE_OMEGA]);
int Ish, Jsh, IJsh, Ksh, idij;
Ish = refuniqshl_map[ish];
Jsh = refuniqshl_map[jsh-nbas];
IJsh = (Ish>=Jsh)?(Ish*(Ish+1)/2+Jsh):(Jsh*(Jsh+1)/2+Ish);
const double *uniq_Rcut2s_IJ, *uniq_Rcut2s_K;
uniq_Rcut2s_IJ = uniq_Rcut2s + uniqshlpr_dij_loc[IJsh] * nbasauxuniq;
double *ri, *rj, *rk, rc[3];
double dij2, dij2_cut, inv_d0, Rijk2, Rcut2, ei, ej;
inv_d0 = 1./dcut_binsize;
dij2_cut = uniq_dcut2s[IJsh];
ei = uniqexp[Ish];
ej = uniqexp[Jsh];
// <<<<<<<<<<
for (m = 0; m < nkshloc; m++) {
msh0 = kshloc[m];
msh1 = kshloc[m+1];
dkmax = ao_loc[msh1] - ao_loc[msh0];
dijm = dij * dkmax;
dijmc = dijm * comp;
dijmk = dijmc * nkpts;
bufkk_r = buf;
bufkk_i = bufkk_r + (size_t)nkpts * dijmk;
bufkL_r = bufkk_i + (size_t)nkpts * dijmk;
bufkL_i = bufkL_r + (size_t)MIN(nimgs,IMGBLK) * dijmk;
bufL = bufkL_i + (size_t)MIN(nimgs,IMGBLK) * dijmk;
pbuf2 = bufL + (size_t)nimgs * dijmc;
cache = pbuf2 + dijmc;
for (i = 0; i < nkpts*dijmk*OF_CMPLX; i++) {
bufkk_r[i] = 0;
}
for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) {
iLcount = MIN(IMGBLK, nimgs - iL0);
for (iL = iL0; iL < iL0+iLcount; iL++) {
shift_bas(env_loc, env, Ls, iptrxyz, iL);
ri = env_loc + iptrxyz;
pbuf = bufL;
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
rj = env_loc + jptrxyz;
// >>>>>>>>
dij2 = get_dsqure(ri, rj);
if(dij2 > dij2_cut) {
for (i = 0; i < dijmc; ++i) {
pbuf[i] = 0;
}
pbuf += dijmc;
continue;
}
idij = (int)(sqrt(dij2)*inv_d0);
uniq_Rcut2s_K = uniq_Rcut2s_IJ + idij * nbasauxuniq;
// <<<<<<<<
get_rc(rc, ri, rj, ei, ej);
for (ksh = msh0; ksh < msh1; ksh++) {
shls[2] = ksh;
dk = ao_loc[ksh+1] - ao_loc[ksh];
dijkc = dij * dk * comp;
Ksh = auxuniqshl_map[ksh-2*nbas];
Rcut2 = uniq_Rcut2s_K[Ksh];
kptrxyz = atm[PTR_COORD+bas[ATOM_OF+
ksh*BAS_SLOTS]*ATM_SLOTS];
rk = env_loc + kptrxyz;
Rijk2 = get_dsqure(rc, rk);
if (Rijk2 < Rcut2) {
env_loc[PTR_RANGE_OMEGA] = 0.;
if ((*intor)(pbuf, NULL, shls, atm, natm,
bas, nbas, env_loc, cintopt,
cache)) {
empty = 0;
}
env_loc[PTR_RANGE_OMEGA] = omega;
if ((*intor)(pbuf2, NULL, shls, atm, natm,
bas, nbas, env_loc, cintopt,
cache)) {
for (i = 0; i < dijkc; i++) {
pbuf[i] -= pbuf2[i];
}
}
} else {
for (i = 0; i < dijkc; i++) {
pbuf[i] = 0;
}
} // if Rijk2
pbuf += dijkc;
} // ksh
}
dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &nimgs,
&D1, bufL, &dijmc, expkL_r, &nimgs,
&D0, bufkL_r+(iL-iL0)*(size_t)dijmk, &dijmc);
dgemm_(&TRANS_N, &TRANS_N, &dijmc, &nkpts, &nimgs,
&D1, bufL, &dijmc, expkL_i, &nimgs,
&D0, bufkL_i+(iL-iL0)*(size_t)dijmk, &dijmc);
} // iL in range(0, nimgs)
// conj(exp(1j*dot(h,k)))
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount,
&D1, bufkL_r, &dijmk, expkL_r+iL0, &nimgs,
&D1, bufkk_r, &dijmk);
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount,
&D1, bufkL_i, &dijmk, expkL_i+iL0, &nimgs,
&D1, bufkk_r, &dijmk);
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount,
&D1, bufkL_i, &dijmk, expkL_r+iL0, &nimgs,
&D1, bufkk_i, &dijmk);
dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount,
&ND1, bufkL_r, &dijmk, expkL_i+iL0, &nimgs,
&D1, bufkk_i, &dijmk);
} // iL0
(*fsort)(out, bufkk_r, bufkk_i, kptij_idx, shls_slice,
ao_loc, nkpts, nkpts_ij, comp, ish, jsh,
msh0, msh1);
} // m
}
void PBCsr3c_kks1(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
_nr3c_kk(intor, &sort3c_kks1, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map, nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize, uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
void PBCsr3c_kks2(int (*intor)(), double complex *out, int nkpts_ij,
int nkpts, int comp, int nimgs, int ish, int jsh,
double *buf, double *env_loc, double *Ls,
double *expkL_r, double *expkL_i, int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_nr3c_kk(intor, &sort3c_kks2_igtj, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map, nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize, uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
} else if (ip == jp) {
_nr3c_kk(intor, &sort3c_kks1, out,
nkpts_ij, nkpts, comp, nimgs, ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map, nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize, uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
}
void PBCsr3c_kk_drv(int (*intor)(), void (*fill)(), double *out,
int nkpts_ij, int nkpts,
int comp, int nimgs,
double *Ls,
double complex *expkL,
int *kptij_idx,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt,
int8_t *shlpr_mask,
int *refuniqshl_map, int *auxuniqshl_map,
int nbasauxuniq, double *uniqexp,
double *uniq_dcut2s, double dcut_binsize,
double *uniq_Rcut2s, int *uniqshlpr_dij_loc,
int *atm, int natm, int *bas, int nbas, double *env,
int nenv)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX);
double *expkL_i = expkL_r + nimgs*nkpts;
int i;
for (i = 0; i < nimgs*nkpts; i++) {
expkL_r[i] = creal(expkL[i]);
expkL_i[i] = cimag(expkL[i]);
}
int di = GTOmax_shell_dim(ao_loc, shls_slice+0, 1);
int dj = GTOmax_shell_dim(ao_loc, shls_slice+2, 1);
int dk = GTOmax_shell_dim(ao_loc, shls_slice+4, 1);
int dijk = di*dj*dk;
int dijmk = MAX(INTBUFMAX, dijk);
size_t count = ((nkpts + MIN(nimgs,IMGBLK))*nkpts * OF_CMPLX +
nimgs + 2) * dijmk * comp;
const int cache_size = GTOmax_cache_size(intor, shls_slice, 3,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int ish, jsh, ij;
double *env_loc = malloc(sizeof(double)*nenv);
memcpy(env_loc, env, sizeof(double)*nenv);
double *buf = malloc(sizeof(double)*(count+cache_size));
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
ish = ij / njsh;
jsh = ij % njsh;
if (!shlpr_mask[ij]) {
continue;
}
(*fill)(intor, out, nkpts_ij, nkpts, comp, nimgs,
ish, jsh,
buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx,
shls_slice, ao_loc, cintopt,
refuniqshl_map, auxuniqshl_map,
nbasauxuniq, uniqexp,
uniq_dcut2s, dcut_binsize,
uniq_Rcut2s, uniqshlpr_dij_loc,
atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(expkL_r);
}
|
SparseDenseProduct.h | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SPARSEDENSEPRODUCT_H
#define EIGEN_SPARSEDENSEPRODUCT_H
namespace Eigen {
namespace internal {
template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
typename AlphaType,
int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
struct sparse_time_dense_product_impl;
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
typedef evaluator<Lhs> LhsEval;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
LhsEval lhsEval(lhs);
Index n = lhs.outerSize();
#ifdef EIGEN_HAS_OPENMP
Eigen::initParallel();
Index threads = Eigen::nbThreads();
#endif
for(Index c=0; c<rhs.cols(); ++c)
{
#ifdef EIGEN_HAS_OPENMP
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
// It basically represents the minimal amount of work to be done to be worth it.
if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
{
// #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
Index hh_omp = (n+threads*4-1)/(threads*4);
#pragma omp parallel for schedule(dynamic,hh_omp) num_threads(threads)
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
else
#endif
{
for(Index i=0; i<n; ++i)
processRow(lhsEval,rhs,res,alpha,i,c);
}
}
}
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
{
typename Res::Scalar tmp(0);
for(LhsInnerIterator it(lhsEval,i); it ;++it)
tmp += it.value() * rhs.coeff(it.index(),col);
res.coeffRef(i,col) += alpha * tmp;
}
};
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >
// {
// enum {
// Defined = 1
// };
// typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
// };
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index c=0; c<rhs.cols(); ++c)
{
for(Index j=0; j<lhs.outerSize(); ++j)
{
// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.coeffRef(it.index(),c) += it.value() * rhs_j;
}
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Res::RowXpr res_j(res.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res_j += (alpha*it.value()) * rhs.row(it.index());
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
{
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
typedef typename internal::remove_all<DenseResType>::type Res;
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
{
evaluator<Lhs> lhsEval(lhs);
for(Index j=0; j<lhs.outerSize(); ++j)
{
typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
for(LhsInnerIterator it(lhsEval,j); it ;++it)
res.row(it.index()) += (alpha*it.value()) * rhs_j;
}
}
};
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
{
sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
}
} // end namespace internal
namespace internal {
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>
: generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
{};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >
{
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
template<typename Dst>
static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
{
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested;
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested;
LhsNested lhsNested(lhs);
RhsNested rhsNested(rhs);
// transpose everything
Transpose<Dst> dstT(dst);
internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
}
};
template<typename Lhs, typename Rhs, int ProductType>
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>
: generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
{};
template<typename LhsT, typename RhsT, bool NeedToTranspose>
struct sparse_dense_outer_product_evaluator
{
protected:
typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
// if the actual left-hand side is a dense vector,
// then build a sparse-view so that we can seamlessly iterate over it.
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1, SparseView<Lhs1> >::type ActualLhs;
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
typedef evaluator<ActualLhs> LhsEval;
typedef evaluator<ActualRhs> RhsEval;
typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
typedef typename ProdXprType::Scalar Scalar;
public:
enum {
Flags = NeedToTranspose ? RowMajorBit : 0,
CoeffReadCost = HugeCost
};
class InnerIterator : public LhsIterator
{
public:
InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
: LhsIterator(xprEval.m_lhsXprImpl, 0),
m_outer(outer),
m_empty(false),
m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
{}
EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
protected:
Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
{
return rhs.coeff(outer);
}
Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
{
typename RhsEval::InnerIterator it(rhs, outer);
if (it && it.index()==0 && it.value()!=Scalar(0))
return it.value();
m_empty = true;
return Scalar(0);
}
Index m_outer;
bool m_empty;
Scalar m_factor;
};
sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
// transpose case
sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
{
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
protected:
const LhsArg m_lhs;
evaluator<ActualLhs> m_lhsXprImpl;
evaluator<ActualRhs> m_rhsXprImpl;
};
// sparse * dense outer product
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
template<typename Lhs, typename Rhs>
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape>
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
{
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
explicit product_evaluator(const XprType& xpr)
: Base(xpr.lhs(), xpr.rhs())
{}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_SPARSEDENSEPRODUCT_H
|
lrthresh.c | /* Copyright 2015. The Regents of the University of California.
* Copyright 2015. Tao Zhang and Joseph Cheng.
* Copyright 2016-2018. Martin Uecker.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2014-2015 Frank Ong <frankong@berkeley.edu>
* 2014 Tao Zhang
* 2014 Joseph Cheng
* 2014 Jon Tamir
* 2014-2018 Martin Uecker
*/
#include <stdlib.h>
#include <complex.h>
#include <math.h>
#include "misc/misc.h"
#include "misc/mri.h"
#include "misc/debug.h"
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/linalg.h"
#include "num/ops.h"
#include "num/blockproc.h"
#include "num/casorati.h"
#include "iter/thresh.h"
#include "lowrank/batchsvd.h"
#include "lowrank/svthresh.h"
#include "lrthresh.h"
struct lrthresh_data_s {
INTERFACE(operator_data_t);
float lambda;
bool randshift;
bool noise;
int remove_mean;
long strs_lev[DIMS];
long strs[DIMS];
long dims_decom[DIMS];
long dims[DIMS];
unsigned long mflags;
unsigned long flags;
long levels;
long blkdims[MAX_LEV][DIMS];
};
static DEF_TYPEID(lrthresh_data_s);
static struct lrthresh_data_s* lrthresh_create_data(const long dims_decom[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean);
static void lrthresh_free_data(const operator_data_t* data);
static void lrthresh_apply(const operator_data_t* _data, float lambda, complex float* dst, const complex float* src);
/**
* Intialize lrthresh operator
*
* @param dims_decom - decomposition dimensions
* @param randshift - randshift boolean
* @param mflags - selects which dimensions gets reshaped as the first dimension in matrix
* @param blkdims - contains block dimensions for all levels
*
*/
const struct operator_p_s* lrthresh_create(const long dims_lev[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean)
{
struct lrthresh_data_s* data = lrthresh_create_data(dims_lev, randshift, mflags, blkdims, lambda, noise, remove_mean);
return operator_p_create(DIMS, dims_lev, DIMS, dims_lev, CAST_UP(data), lrthresh_apply, lrthresh_free_data);
}
/**
* Intialize lrthresh data
*
* @param dims_decom - dimensions with levels at LEVEL_DIMS
* @param randshift - randshift boolean
* @param mflags - selects which dimensions gets reshaped as the first dimension in matrix
* @param blkdims - contains block dimensions for all levels
*
*/
static struct lrthresh_data_s* lrthresh_create_data(const long dims_decom[DIMS], bool randshift, unsigned long mflags, const long blkdims[MAX_LEV][DIMS], float lambda, bool noise, int remove_mean)
{
PTR_ALLOC(struct lrthresh_data_s, data);
SET_TYPEID(lrthresh_data_s, data);
data->randshift = randshift;
data->mflags = mflags;
data->lambda = lambda;
data->noise = noise;
data->remove_mean = remove_mean;
// level dimensions
md_copy_dims(DIMS, data->dims_decom, dims_decom);
md_calc_strides(DIMS, data->strs_lev, dims_decom, CFL_SIZE);
// image dimensions
data->levels = dims_decom[LEVEL_DIM];
md_select_dims(DIMS, ~LEVEL_FLAG, data->dims, dims_decom);
md_calc_strides(DIMS, data->strs, data->dims, CFL_SIZE);
// blkdims
for(long l = 0; l < data->levels; l++) {
for (long i = 0; i < DIMS; i++)
data->blkdims[l][i] = blkdims[l][i];
}
return PTR_PASS(data);
}
/**
* Free lrthresh operator
*/
static void lrthresh_free_data(const operator_data_t* _data)
{
xfree(CAST_DOWN(lrthresh_data_s, _data));
}
/*
* Return a random number between 0 and limit inclusive.
*/
static int rand_lim(int limit)
{
int divisor = RAND_MAX / (limit + 1);
int retval;
do {
retval = rand() / divisor;
} while (retval > limit);
return retval;
}
/*
* Low rank threhsolding for arbitrary block sizes
*/
static void lrthresh_apply(const operator_data_t* _data, float mu, complex float* dst, const complex float* src)
{
auto data = CAST_DOWN(lrthresh_data_s, _data);
float lambda = mu * data->lambda;
long strs1[DIMS];
md_calc_strides(DIMS, strs1, data->dims_decom, 1);
//#pragma omp parallel for
for (int l = 0; l < data->levels; l++) {
complex float* dstl = dst + l * strs1[LEVEL_DIM];
const complex float* srcl = src + l * strs1[LEVEL_DIM];
long blkdims[DIMS];
long shifts[DIMS];
long unshifts[DIMS];
long zpad_dims[DIMS];
long M = 1;
for (unsigned int i = 0; i < DIMS; i++) {
blkdims[i] = data->blkdims[l][i];
zpad_dims[i] = (data->dims[i] + blkdims[i] - 1) / blkdims[i];
zpad_dims[i] *= blkdims[i];
if (MD_IS_SET(data->mflags, i))
M *= blkdims[i];
if (data->randshift)
shifts[i] = rand_lim(MIN(blkdims[i] - 1, zpad_dims[i] - blkdims[i]));
else
shifts[i] = 0;
unshifts[i] = -shifts[i];
}
long zpad_strs[DIMS];
md_calc_strides(DIMS, zpad_strs, zpad_dims, CFL_SIZE);
long blk_size = md_calc_size(DIMS, blkdims);
long img_size = md_calc_size(DIMS, zpad_dims);
long N = blk_size / M;
long B = img_size / blk_size;
if (data->noise && (l == data->levels - 1)) {
M = img_size;
N = 1;
B = 1;
}
complex float* tmp = md_alloc_sameplace(DIMS, zpad_dims, CFL_SIZE, dst);
md_circ_ext(DIMS, zpad_dims, tmp, data->dims, srcl, CFL_SIZE);
md_circ_shift(DIMS, zpad_dims, shifts, tmp, tmp, CFL_SIZE);
long mat_dims[2];
basorati_dims(DIMS, mat_dims, blkdims, zpad_dims);
complex float* tmp_mat = md_alloc_sameplace(2, mat_dims, CFL_SIZE, dst);
// Reshape image into a blk_size x number of blocks matrix
basorati_matrix(DIMS, blkdims, mat_dims, tmp_mat, zpad_dims, zpad_strs, tmp);
batch_svthresh(M, N, mat_dims[1], lambda * GWIDTH(M, N, B), *(complex float (*)[mat_dims[1]][M][N])tmp_mat);
// for ( int b = 0; b < mat_dims[1]; b++ )
// svthresh(M, N, lambda * GWIDTH(M, N, B), tmp_mat, tmp_mat);
basorati_matrixH(DIMS, blkdims, zpad_dims, zpad_strs, tmp, mat_dims, tmp_mat);
md_circ_shift(DIMS, zpad_dims, unshifts, tmp, tmp, CFL_SIZE);
md_resize(DIMS, data->dims, dstl, zpad_dims, tmp, CFL_SIZE);
md_free(tmp);
md_free(tmp_mat);
}
}
/*
* Nuclear norm calculation for arbitrary block sizes
*/
float lrnucnorm(const struct operator_p_s* op, const complex float* src)
{
struct lrthresh_data_s* data = (struct lrthresh_data_s*)operator_p_get_data(op);
long strs1[DIMS];
md_calc_strides(DIMS, strs1, data->dims_decom, 1);
float nnorm = 0.;
for (int l = 0; l < data->levels; l++) {
const complex float* srcl = src + l * strs1[LEVEL_DIM];
long blkdims[DIMS];
long blksize = 1;
for (unsigned int i = 0; i < DIMS; i++) {
blkdims[i] = data->blkdims[l][i];
blksize *= blkdims[i];
}
if (1 == blksize) {
for (long j = 0; j < md_calc_size(DIMS, data->dims); j++)
nnorm += 2 * cabsf(srcl[j]);
continue;
}
struct svthresh_blockproc_data* svdata = svthresh_blockproc_create(data->mflags, 0., 0);
complex float* tmp = md_alloc_sameplace(DIMS, data->dims, CFL_SIZE, src);
//debug_print_dims(DP_DEBUG1, DIMS, data->dims);
md_copy(DIMS, data->dims, tmp, srcl, CFL_SIZE);
// Block SVD Threshold
nnorm = blockproc(DIMS, data->dims, blkdims, (void*)svdata, nucnorm_blockproc, tmp, tmp);
xfree(svdata);
md_free(tmp);
}
return nnorm;
}
/*************
* Block dimensions functions
*************/
/**
* Generates multiscale low rank block sizes
*
* @param blkdims - block sizes to be written
* @param flags - specifies which dimensions to do the blocks. The other dimensions will be the same as input
* @param idims - input dimensions
* @param blkskip - scale each level by blkskip to generate the next level
*
* returns number of levels
*/
long multilr_blkdims(long blkdims[MAX_LEV][DIMS], unsigned long flags, const long idims[DIMS], int blkskip, long initblk)
{
// Multiscale low rank block sizes
long tmp_block[DIMS];
for (unsigned int i = 0; i < DIMS; i++) {
if (MD_IS_SET(flags, i))
tmp_block[i] = MIN(initblk, idims[i]);
else
tmp_block[i] = idims[i];
}
bool done;
// Loop block_sizes
long levels = 0;
do {
levels++;
debug_printf(DP_INFO, "[\t");
for (unsigned int i = 0; i < DIMS; i++) {
blkdims[levels - 1][i] = tmp_block[i];
debug_printf(DP_INFO, "%ld\t", blkdims[levels-1][i]);
}
debug_printf(DP_INFO, "]\n");
done = true;
for (unsigned int i = 0; i < DIMS; i++) {
if (MD_IS_SET(flags, i) && (idims[i] != 1)) {
tmp_block[i] = MIN(tmp_block[i] * blkskip, idims[i]);
done = done && (blkdims[levels - 1][i] == idims[i]);
}
}
} while(!done);
return levels;
}
void add_lrnoiseblk(long* levels, long blkdims[MAX_LEV][DIMS], const long idims[DIMS])
{
levels[0]++;
debug_printf(DP_DEBUG1, "[\t");
for (unsigned int i = 0; i < DIMS; i++) {
blkdims[levels[0] - 1][i] = idims[i];
debug_printf(DP_DEBUG1, "%ld\t", blkdims[levels[0] - 1][i]);
}
debug_printf(DP_DEBUG1, "]\n");
}
/**
* Generates locally low rank block sizes
*
* @param blkdims - block sizes to be written
* @param flags - specifies which dimensions to do the blocks. The other dimensions will be the same as input
* @param idims - input dimensions
* @param llkblk - the block size
*
* returns number of levels = 1
*/
long llr_blkdims(long blkdims[MAX_LEV][DIMS], unsigned long flags, const long idims[DIMS], long llrblk)
{
for (unsigned int i = 0; i < DIMS; i++) {
if (MD_IS_SET(flags, i))
blkdims[0][i] = MIN(llrblk, idims[i]);
else
blkdims[0][i] = idims[i];
}
return 1;
}
/**
* Generates low rank + sparse block sizes
*
* @param blkdims - block sizes to be written
* @param idims - input dimensions
*
* returns number of levels = 2
*/
long ls_blkdims(long blkdims[MAX_LEV][DIMS], const long idims[DIMS])
{
for (unsigned int i = 0; i < DIMS; i++) {
blkdims[0][i] = 1;
blkdims[1][i] = idims[i];
}
return 2;
}
float get_lrthresh_lambda(const struct operator_p_s* o)
{
auto data = CAST_DOWN(lrthresh_data_s, operator_p_get_data(o));
return data->lambda;
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(1,ceild(24*t2-Nz+9,4)),3*t1+1),6*t1-6*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(12*t1+Ny+15,4)),floord(24*t2+Ny+11,4)),floord(24*t1-24*t2+Nz+Ny+13,4));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-62,64)),ceild(3*t1-126,128)),ceild(24*t2-Nz-499,512)),ceild(4*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(12*t1+Nx+15,512)),floord(24*t2+Nx+11,512)),floord(4*t3+Nx-9,512)),floord(24*t1-24*t2+Nz+Nx+13,512));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),t3-1),128*t4+126);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(512*t4,4*t5+4);
ubv=min(512*t4+511,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
miniflux.gold.h | void miniflux_gold (
double *new_box_0_in, double *new_box_1_in, double *new_box_2_in, double *new_box_3_in, double *new_box_4_in,
double *old_box_0_in, double *old_box_1_in, double *old_box_2_in, double *old_box_3_in, double *old_box_4_in,
double *gx_0_in, double *gx_1_in, double *gx_2_in, double *gx_3_in, double *gx_4_in,
double *gy_0_in, double *gy_1_in, double *gy_2_in, double *gy_3_in, double *gy_4_in,
double *gz_0_in, double *gz_1_in, double *gz_2_in, double *gz_3_in, double *gz_4_in, int N) {
double factor1 = (1.0/12.0);
double factor2 = 2.0;
double (*new_box_0)[320][320] = (double (*)[320][320]) new_box_0_in;
double (*new_box_1)[320][320] = (double (*)[320][320]) new_box_1_in;
double (*new_box_2)[320][320] = (double (*)[320][320]) new_box_2_in;
double (*new_box_3)[320][320] = (double (*)[320][320]) new_box_3_in;
double (*new_box_4)[320][320] = (double (*)[320][320]) new_box_4_in;
double (*old_box_0)[320][320] = (double (*)[320][320]) old_box_0_in;
double (*old_box_1)[320][320] = (double (*)[320][320]) old_box_1_in;
double (*old_box_2)[320][320] = (double (*)[320][320]) old_box_2_in;
double (*old_box_3)[320][320] = (double (*)[320][320]) old_box_3_in;
double (*old_box_4)[320][320] = (double (*)[320][320]) old_box_4_in;
double (*gx_0)[320][320] = (double (*)[320][320]) gx_0_in;
double (*gx_1)[320][320] = (double (*)[320][320]) gx_1_in;
double (*gx_2)[320][320] = (double (*)[320][320]) gx_2_in;
double (*gx_3)[320][320] = (double (*)[320][320]) gx_3_in;
double (*gx_4)[320][320] = (double (*)[320][320]) gx_4_in;
double (*gy_0)[320][320] = (double (*)[320][320]) gy_0_in;
double (*gy_1)[320][320] = (double (*)[320][320]) gy_1_in;
double (*gy_2)[320][320] = (double (*)[320][320]) gy_2_in;
double (*gy_3)[320][320] = (double (*)[320][320]) gy_3_in;
double (*gy_4)[320][320] = (double (*)[320][320]) gy_4_in;
double (*gz_0)[320][320] = (double (*)[320][320]) gz_0_in;
double (*gz_1)[320][320] = (double (*)[320][320]) gz_1_in;
double (*gz_2)[320][320] = (double (*)[320][320]) gz_2_in;
double (*gz_3)[320][320] = (double (*)[320][320]) gz_3_in;
double (*gz_4)[320][320] = (double (*)[320][320]) gz_4_in;
#pragma omp parallel for
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=2;ix<N-1;ix++){
gx_0[iz][iy][ix] = factor1*
(old_box_0[iz][iy][ix-2]+
7*(old_box_0[iz][iy][ix-1]+old_box_0[iz][iy][ix]) +
old_box_0[iz][iy][ix+1]);
gx_1[iz][iy][ix] = factor1*
(old_box_1[iz][iy][ix-2]+
7*(old_box_1[iz][iy][ix-1]+old_box_1[iz][iy][ix]) +
old_box_1[iz][iy][ix+1]);
gx_2[iz][iy][ix] = factor1*
(old_box_2[iz][iy][ix-2]+
7*(old_box_2[iz][iy][ix-1]+old_box_2[iz][iy][ix]) +
old_box_2[iz][iy][ix+1]);
gx_3[iz][iy][ix] = factor1*
(old_box_3[iz][iy][ix-2]+
7*(old_box_3[iz][iy][ix-1]+old_box_3[iz][iy][ix]) +
old_box_3[iz][iy][ix+1]);
gx_4[iz][iy][ix] = factor1*
(old_box_4[iz][iy][ix-2]+
7*(old_box_4[iz][iy][ix-1]+old_box_4[iz][iy][ix]) +
old_box_4[iz][iy][ix+1]);
}
}
}
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N;ix++){
gx_0[iz][iy][ix] *= factor2*gx_2[iz][iy][ix];
gx_1[iz][iy][ix] *= factor2*gx_2[iz][iy][ix];
gx_3[iz][iy][ix] *= factor2*gx_2[iz][iy][ix];
gx_4[iz][iy][ix] *= factor2*gx_2[iz][iy][ix];
gx_2[iz][iy][ix] *= factor2*gx_2[iz][iy][ix];
}
}
}
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N-1;ix++){
new_box_0[iz][iy][ix]+= gx_0[iz][iy][ix+1]-gx_0[iz][iy][ix];
new_box_1[iz][iy][ix]+= gx_1[iz][iy][ix+1]-gx_1[iz][iy][ix];
new_box_2[iz][iy][ix]+= gx_2[iz][iy][ix+1]-gx_2[iz][iy][ix];
new_box_3[iz][iy][ix]+= gx_3[iz][iy][ix+1]-gx_3[iz][iy][ix];
new_box_4[iz][iy][ix]+= gx_4[iz][iy][ix+1]-gx_4[iz][iy][ix];
}
}
}
//---------------------- y-direction
for(int iz=0;iz<N;iz++){
for(int iy=2;iy<N-1;iy++){
for(int ix=0;ix<N;ix++){
gy_0[iz][iy][ix] = factor1*
(old_box_0[iz][iy-2][ix]+
7*(old_box_0[iz][iy-1][ix]+old_box_0[iz][iy][ix]) +
old_box_0[iz][iy+1][ix]);
gy_1[iz][iy][ix] = factor1*
(old_box_1[iz][iy-2][ix]+
7*(old_box_1[iz][iy-1][ix]+old_box_1[iz][iy][ix]) +
old_box_1[iz][iy+1][ix]);
gy_2[iz][iy][ix] = factor1*
(old_box_2[iz][iy-2][ix]+
7*(old_box_2[iz][iy-1][ix]+old_box_2[iz][iy][ix]) +
old_box_2[iz][iy+1][ix]);
gy_3[iz][iy][ix] = factor1*
(old_box_3[iz][iy-2][ix]+
7*(old_box_3[iz][iy-1][ix]+old_box_3[iz][iy][ix]) +
old_box_3[iz][iy+1][ix]);
gy_4[iz][iy][ix] = factor1*
(old_box_4[iz][iy-2][ix]+
7*(old_box_4[iz][iy-1][ix]+old_box_4[iz][iy][ix]) +
old_box_4[iz][iy+1][ix]);
}
}
}
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N;ix++){
gy_0[iz][iy][ix] = factor2*gy_0[iz][iy][ix]*gy_3[iz][iy][ix];
gy_1[iz][iy][ix] = factor2*gy_1[iz][iy][ix]*gy_3[iz][iy][ix];
gy_2[iz][iy][ix] = factor2*gy_2[iz][iy][ix]*gy_3[iz][iy][ix];
gy_4[iz][iy][ix] = factor2*gy_4[iz][iy][ix]*gy_3[iz][iy][ix];
gy_3[iz][iy][ix] = factor2*gy_3[iz][iy][ix]*gy_3[iz][iy][ix];
}
}
}
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N-1;iy++){
for(int ix=0;ix<N;ix++){
new_box_0[iz][iy][ix]+= gy_0[iz][iy+1][ix]-gy_0[iz][iy][ix];
new_box_1[iz][iy][ix]+= gy_1[iz][iy+1][ix]-gy_1[iz][iy][ix];
new_box_2[iz][iy][ix]+= gy_2[iz][iy+1][ix]-gy_2[iz][iy][ix];
new_box_3[iz][iy][ix]+= gy_3[iz][iy+1][ix]-gy_3[iz][iy][ix];
new_box_4[iz][iy][ix]+= gy_4[iz][iy+1][ix]-gy_4[iz][iy][ix];
}
}
}
//---------------------- z-direction
for(int iz=2;iz<N-1;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N;ix++){
gz_0[iz][iy][ix] = factor1*
(old_box_0[iz-2][iy][ix]+
7*(old_box_0[iz-1][iy][ix]+old_box_0[iz][iy][ix]) +
old_box_0[iz+1][iy][ix]);
gz_1[iz][iy][ix] = factor1*
(old_box_1[iz-2][iy][ix]+
7*(old_box_1[iz-1][iy][ix]+old_box_1[iz][iy][ix]) +
old_box_1[iz+1][iy][ix]);
gz_2[iz][iy][ix] = factor1*
(old_box_2[iz-2][iy][ix]+
7*(old_box_2[iz-1][iy][ix]+old_box_2[iz][iy][ix]) +
old_box_2[iz+1][iy][ix]);
gz_3[iz][iy][ix] = factor1*
(old_box_3[iz-2][iy][ix]+
7*(old_box_3[iz-1][iy][ix]+old_box_3[iz][iy][ix]) +
old_box_3[iz+1][iy][ix]);
gz_4[iz][iy][ix] = factor1*
(old_box_4[iz-2][iy][ix]+
7*(old_box_4[iz-1][iy][ix]+old_box_4[iz][iy][ix]) +
old_box_4[iz+1][iy][ix]);
}
}
}
for(int iz=0;iz<N;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N;ix++){
gz_0[iz][iy][ix] = factor2*gz_0[iz][iy][ix]*gz_4[iz][iy][ix];
gz_1[iz][iy][ix] = factor2*gz_1[iz][iy][ix]*gz_4[iz][iy][ix];
gz_2[iz][iy][ix] = factor2*gz_2[iz][iy][ix]*gz_4[iz][iy][ix];
gz_3[iz][iy][ix] = factor2*gz_3[iz][iy][ix]*gz_4[iz][iy][ix];
gz_4[iz][iy][ix] = factor2*gz_4[iz][iy][ix]*gz_4[iz][iy][ix];
}
}
}
for(int iz=0;iz<N-1;iz++){
for(int iy=0;iy<N;iy++){
for(int ix=0;ix<N;ix++){
new_box_0[iz][iy][ix]+= gz_0[iz+1][iy][ix]-gz_0[iz][iy][ix];
new_box_1[iz][iy][ix]+= gz_1[iz+1][iy][ix]-gz_1[iz][iy][ix];
new_box_2[iz][iy][ix]+= gz_2[iz+1][iy][ix]-gz_2[iz][iy][ix];
new_box_3[iz][iy][ix]+= gz_3[iz+1][iy][ix]-gz_3[iz][iy][ix];
new_box_4[iz][iy][ix]+= gz_4[iz+1][iy][ix]-gz_4[iz][iy][ix];
}
}
}
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 24;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__band_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__band_int64
// A.*B function (eWiseMult): GB_AemultB__band_int64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__band_int64
// C+=b function (dense accum): GB_Cdense_accumb__band_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__band_int64
// C=scalar+B GB_bind1st__band_int64
// C=scalar+B' GB_bind1st_tran__band_int64
// C=A+scalar GB_bind2nd__band_int64
// C=A'+scalar GB_bind2nd_tran__band_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) & (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_INT64 || GxB_NO_BAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__band_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__band_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__band_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__band_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__band_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__band_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__band_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB_bind1st_tran__band_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB_bind2nd_tran__band_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distribute-cache-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/policy.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/timer-private.h"
#include "magick/utility.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const IndexPacket
*GetVirtualIndexesFromCache(const Image *);
static const PixelPacket
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,
PixelPacket *,ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCacheIndexes(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCacheIndexes(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCachePixels(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static PixelPacket
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static inline OpenCLCacheInfo *RelinquishOpenCLCacheInfo(MagickCLEnv clEnv,
OpenCLCacheInfo *info)
{
ssize_t
i;
for (i=0; i < (ssize_t) info->event_count; i++)
clEnv->library->clReleaseEvent(info->events[i]);
info->events=(cl_event *) RelinquishMagickMemory(info->events);
DestroySemaphoreInfo(&info->events_semaphore);
if (info->buffer != (cl_mem) NULL)
{
clEnv->library->clReleaseMemObject(info->buffer);
info->buffer=(cl_mem) NULL;
}
return((OpenCLCacheInfo *) RelinquishMagickMemory(info));
}
static void CL_API_CALL RelinquishPixelCachePixelsDelayed(
cl_event magick_unused(event),cl_int magick_unused(event_command_exec_status),
void *user_data)
{
MagickCLEnv
clEnv;
OpenCLCacheInfo
*info;
PixelPacket
*pixels;
ssize_t
i;
magick_unreferenced(event);
magick_unreferenced(event_command_exec_status);
info=(OpenCLCacheInfo *) user_data;
clEnv=GetDefaultOpenCLEnv();
for (i=(ssize_t)info->event_count-1; i >= 0; i--)
{
cl_int
event_status;
cl_uint
status;
status=clEnv->library->clGetEventInfo(info->events[i],
CL_EVENT_COMMAND_EXECUTION_STATUS,sizeof(cl_int),&event_status,NULL);
if ((status == CL_SUCCESS) && (event_status > CL_COMPLETE))
{
clEnv->library->clSetEventCallback(info->events[i],CL_COMPLETE,
&RelinquishPixelCachePixelsDelayed,info);
return;
}
}
pixels=info->pixels;
RelinquishMagickResource(MemoryResource,info->length);
(void) RelinquishOpenCLCacheInfo(clEnv,info);
(void) RelinquishAlignedMemory(pixels);
}
static MagickBooleanType RelinquishOpenCLBuffer(
CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *) NULL);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return(MagickFalse);
RelinquishPixelCachePixelsDelayed((cl_event) NULL,0,cache_info->opencl);
return(MagickTrue);
}
static cl_event *CopyOpenCLEvents(OpenCLCacheInfo *opencl_info,
cl_uint *event_count)
{
cl_event
*events;
register size_t
i;
assert(opencl_info != (OpenCLCacheInfo *) NULL);
events=(cl_event *) NULL;
LockSemaphoreInfo(opencl_info->events_semaphore);
*event_count=opencl_info->event_count;
if (*event_count > 0)
{
events=AcquireQuantumMemory(*event_count,sizeof(*events));
if (events == (cl_event *) NULL)
*event_count=0;
else
{
for (i=0; i < opencl_info->event_count; i++)
events[i]=opencl_info->events[i];
}
}
UnlockSemaphoreInfo(opencl_info->events_semaphore);
return(events);
}
#endif
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A d d O p e n C L E v e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddOpenCLEvent() adds an event to the list of operations the next operation
% should wait for.
%
% The format of the AddOpenCLEvent() method is:
%
% void AddOpenCLEvent(const Image *image,cl_event event)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event: the event that should be added.
%
*/
extern MagickPrivate void AddOpenCLEvent(const Image *image,cl_event event)
{
CacheInfo
*magick_restrict cache_info;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
assert(event != (cl_event) NULL);
cache_info=(CacheInfo *)image->cache;
assert(cache_info->opencl != (OpenCLCacheInfo *) NULL);
clEnv=GetDefaultOpenCLEnv();
if (clEnv->library->clRetainEvent(event) != CL_SUCCESS)
{
clEnv->library->clWaitForEvents(1,&event);
return;
}
LockSemaphoreInfo(cache_info->opencl->events_semaphore);
if (cache_info->opencl->events == (cl_event *) NULL)
{
cache_info->opencl->events=AcquireMagickMemory(sizeof(
*cache_info->opencl->events));
cache_info->opencl->event_count=1;
}
else
cache_info->opencl->events=ResizeQuantumMemory(cache_info->opencl->events,
++cache_info->opencl->event_count,sizeof(*cache_info->opencl->events));
if (cache_info->opencl->events == (cl_event *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
cache_info->opencl->events[cache_info->opencl->event_count-1]=event;
UnlockSemaphoreInfo(cache_info->opencl->events_semaphore);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->channels=4;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AllocateSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AllocateSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickExport MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AllocateSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickExport void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
DestroySemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
n;
NexusInfo
**magick_restrict clip_nexus;
register const PixelPacket
*magick_restrict r;
register IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->clip_mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
clip_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
indexes=nexus_info->virtual_nexus->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelCacheNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (const PixelPacket *) NULL))
return(MagickFalse);
n=0;
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
mask_alpha=QuantumScale*GetPixelIntensity(image,r);
if (fabs(mask_alpha) >= MagickEpsilon)
{
SetPixelRed(q,mask_alpha*MagickOver_((MagickRealType) p->red,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->red,
(MagickRealType) GetPixelOpacity(q)));
SetPixelGreen(q,mask_alpha*MagickOver_((MagickRealType) p->green,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->green,
(MagickRealType) GetPixelOpacity(q)));
SetPixelBlue(q,mask_alpha*MagickOver_((MagickRealType) p->blue,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->blue,
(MagickRealType) GetPixelOpacity(q)));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+n,GetPixelIndex(indexes+n));
}
p++;
q++;
r++;
n++;
}
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) ||
(clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->columns*cache_info->rows*sizeof(*cache_info->pixels));
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*
sizeof(*cache_info->indexes));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (RelinquishOpenCLBuffer(cache_info) != MagickFalse)
{
cache_info->pixels=(PixelPacket *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(PixelPacket *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->indexes=(IndexPacket *) NULL;
}
MagickExport Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(PixelPacket *) NULL;
nexus_info->pixels=(PixelPacket *) NULL;
nexus_info->indexes=(IndexPacket *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (PixelPacket *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache().
%
% The format of the GetAuthenticIndexesFromCache() method is:
%
% IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexQueue() returns the authentic black channel or the colormap
% indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetAuthenticIndexQueue() method is:
%
% IndexPacket *GetAuthenticIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
return(cache_info->methods.get_authentic_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
cl_context
context;
cl_int
status;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *)image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *)image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
clEnv=GetDefaultOpenCLEnv();
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
{
assert(cache_info->pixels != NULL);
context=GetOpenCLContext(clEnv);
cache_info->opencl=(OpenCLCacheInfo *) AcquireCriticalMemory(
sizeof(*cache_info->opencl));
(void) memset(cache_info->opencl,0,sizeof(*cache_info->opencl));
cache_info->opencl->events_semaphore=AllocateSemaphoreInfo();
cache_info->opencl->length=cache_info->length;
cache_info->opencl->pixels=cache_info->pixels;
cache_info->opencl->buffer=clEnv->library->clCreateBuffer(context,
CL_MEM_USE_HOST_PTR,cache_info->length,cache_info->pixels,&status);
if (status != CL_SUCCESS)
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
clEnv->library->clRetainMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return((cl_mem) NULL);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((PixelPacket *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
if (cache_info->active_index_channel != MagickFalse)
if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% PixelPacket *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static PixelPacket *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% PixelPacket *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking
% GetAuthenticPixels() to obtain the black color component or colormap indexes
% (of type IndexPacket) corresponding to the region. Once the PixelPacket
% (and/or IndexPacket) array has been updated, the changes must be saved back
% to the underlying image using SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O p e n C L E v e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOpenCLEvents() returns the events that the next operation should wait
% for. The argument event_count is set to the number of events.
%
% The format of the GetOpenCLEvents() method is:
%
% const cl_event *GetOpenCLEvents(const Image *image,
% cl_command_queue queue)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event_count: will be set to the number of events.
%
*/
extern MagickPrivate cl_event *GetOpenCLEvents(const Image *image,
cl_uint *event_count)
{
CacheInfo
*magick_restrict cache_info;
cl_event
*events;
assert(image != (const Image *) NULL);
assert(event_count != (cl_uint *) NULL);
cache_info=(CacheInfo *) image->cache;
*event_count=0;
events=(cl_event *) NULL;
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
events=CopyOpenCLEvents(cache_info->opencl,event_count);
return(events);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
CacheInfo
*magick_restrict cache_info;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_epoch=GetMagickTime();
cache_timelimit=GetMagickResourceLimit(TimeResource);
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AllocateSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
DestroySemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MapCache, MemoryCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetPixelCacheType(const Image *image)
{
return(GetImagePixelCacheType(image));
}
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMagickPixel() method is:
%
% MagickBooleanType GetOneVirtualMagickPixel(const Image image,
% const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
% ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
GetMagickPixelPacket(image,pixel);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]);
SetMagickPixelPacket(image,pixels,indexes,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M e t h o d P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMethodPixel() method is:
%
% MagickBooleanType GetOneVirtualMethodPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
virtual_pixel_method,x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelPacket method,const ssize_t x,const ssize_t y,
% PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
*pixel=image->background_color;
pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheChannels() returns the number of pixel channels associated
% with this instance of the pixel cache.
%
% The format of the GetPixelCacheChannels() method is:
%
% size_t GetPixelCacheChannels(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheChannels returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport size_t GetPixelCacheChannels(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_indexes_from_handler=
GetAuthenticIndexesFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated with
% the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*width=2048UL/sizeof(PixelPacket);
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/sizeof(PixelPacket);
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualIndexesFromCache() method is:
%
% IndexPacket *GetVirtualIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const IndexPacket *GetVirtualIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromNexus() returns the indexes associated with the
% specified cache nexus.
%
% The format of the GetVirtualIndexesFromNexus() method is:
%
% const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap indexes.
%
*/
MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((IndexPacket *) NULL);
return(nexus_info->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexQueue() returns the virtual black channel or the
% colormap indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetVirtualIndexQueue() method is:
%
% const IndexPacket *GetVirtualIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
return(cache_info->methods.get_virtual_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% PixelPacket *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickExport const PixelPacket *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
IndexPacket
virtual_index;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
PixelPacket
*magick_restrict pixels,
virtual_pixel;
register const IndexPacket
*magick_restrict virtual_indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
u,
v;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const PixelPacket *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((const PixelPacket *) NULL);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
if ((cache_info->storage_class == PseudoClass) ||
(cache_info->colorspace == CMYKColorspace))
{
status=ReadPixelCacheIndexes(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
}
return(pixels);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
q=pixels;
indexes=nexus_info->indexes;
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case GrayVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange/2);
SetPixelGreen(&virtual_pixel,QuantumRange/2);
SetPixelBlue(&virtual_pixel,QuantumRange/2);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case TransparentVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,TransparentOpacity);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange);
SetPixelGreen(&virtual_pixel,QuantumRange);
SetPixelBlue(&virtual_pixel,QuantumRange);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
default:
{
virtual_pixel=image->background_color;
break;
}
}
virtual_index=(IndexPacket) 0;
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
}
if (p == (const PixelPacket *) NULL)
break;
*q++=(*p);
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
*indexes++=(*virtual_indexes);
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const PixelPacket *) NULL)
break;
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) length*sizeof(*p));
q+=length;
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
{
(void) memcpy(indexes,virtual_indexes,(size_t) length*
sizeof(*virtual_indexes));
indexes+=length;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (v < (ssize_t) rows)
return((const PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const PixelPacket *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const PixelPacket *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated with the
% last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const PixelPacket *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated with the last call
% to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% PixelPacket *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const PixelPacket *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const IndexPacket *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((PixelPacket *) NULL);
return((const PixelPacket *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ApplyPixelCompositeMask(const MagickPixelPacket *p,
const MagickRealType alpha,const MagickPixelPacket *q,
const MagickRealType beta,MagickPixelPacket *composite)
{
double
gamma;
if (fabs(alpha-TransparentOpacity) < MagickEpsilon)
{
*composite=(*q);
return;
}
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta);
composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta);
composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta);
if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace))
composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
n;
MagickPixelPacket
alpha,
beta;
NexusInfo
**magick_restrict mask_nexus;
register const PixelPacket
*magick_restrict r;
register IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply composite mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->mask == (Image *) NULL) || (image->storage_class == PseudoClass))
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
mask_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
indexes=nexus_info->virtual_nexus->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelCacheNexus(image->mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,mask_nexus[0],&image->exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (const PixelPacket *) NULL))
return(MagickFalse);
n=0;
GetMagickPixelPacket(image,&alpha);
GetMagickPixelPacket(image,&beta);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
SetMagickPixelPacket(image,p,indexes+n,&alpha);
SetMagickPixelPacket(image,q,nexus_indexes+n,&beta);
ApplyPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha,
alpha.opacity,&beta);
SetPixelRed(q,ClampToQuantum(beta.red));
SetPixelGreen(q,ClampToQuantum(beta.green));
SetPixelBlue(q,ClampToQuantum(beta.blue));
SetPixelOpacity(q,ClampToQuantum(beta.opacity));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+n,GetPixelIndex(indexes+n));
p++;
q++;
r++;
n++;
}
}
mask_nexus=DestroyPixelCacheNexus(mask_nexus,1);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% colormap indexes, and memory mapping the cache if it is disk based. The
% cache nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
message[MaxTextExtent];
(void) FormatMagickSize(length,MagickFalse,format);
(void) FormatLocaleString(message,MaxTextExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MaxTextExtent],
message[MaxTextExtent];
const char
*hosts,
*type;
MagickSizeType
length,
number_pixels;
MagickStatusType
status;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->mode=mode;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
cache_info->channels=image->channels;
cache_info->active_index_channel=((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
packet_size+=sizeof(IndexPacket);
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->colorspace=image->colorspace;
cache_info->type=MemoryCache;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status&=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",
type,(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MaxTextExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,GetDistributeCacheFile(
(DistributeCacheInfo *) cache_info->server_info),type,
(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MaxTextExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->active_index_channel=cache_info->active_index_channel;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
clone_info->channels=cache_info->channels;
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info,
exception));
}
MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
PixelPacket
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((PixelPacket *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((PixelPacket *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((PixelPacket *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a PixelPacket array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheIndexes() reads colormap indexes from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheIndexes() method is:
%
% MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheIndexes(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register IndexPacket
*magick_restrict q;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*magick_restrict p;
/*
Read indexes from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read indexes from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read indexes from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register PixelPacket
*magick_restrict q;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
if ((length/sizeof(PixelPacket)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
q=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickExport Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
cache_info->methods.get_virtual_indexes_from_handler=
cache_methods->get_virtual_indexes_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
cache_info->methods.get_authentic_indexes_from_handler=
cache_methods->get_authentic_indexes_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% PixelPacket SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrcit cache_info,const MapMode mode,
% const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) length));
if (nexus_info->cache != (PixelPacket *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (PixelPacket *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (PixelPacket *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static PixelPacket *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((PixelPacket *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((PixelPacket *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+offset;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=cache_info->indexes+offset;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((PixelPacket *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
length+=number_pixels*sizeof(IndexPacket);
status=MagickTrue;
if (nexus_info->cache == (PixelPacket *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
{
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
return((PixelPacket *) NULL);
}
nexus_info->pixels=nexus_info->cache;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,
const Quantum opacity)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
&image->exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
q->opacity=opacity;
q++;
}
status=SyncCacheViewAuthenticPixels(image_view,&image->exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace((Image *) image,sRGBColorspace);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->matte == MagickFalse)
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() ensures all the OpenCL operations have been
% completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *)NULL);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (OpenCLCacheInfo *)NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl != (OpenCLCacheInfo *)NULL)
{
cl_event
*events;
cl_uint
event_count;
clEnv=GetDefaultOpenCLEnv();
events=CopyOpenCLEvents(cache_info->opencl,&event_count);
if (events != (cl_event *) NULL)
{
cl_command_queue
queue;
cl_context
context;
cl_int
status;
PixelPacket
*pixels;
context=GetOpenCLContext(clEnv);
queue=AcquireOpenCLCommandQueue(clEnv);
pixels=(PixelPacket *) clEnv->library->clEnqueueMapBuffer(queue,
cache_info->opencl->buffer,CL_TRUE, CL_MAP_READ | CL_MAP_WRITE,0,
cache_info->length,event_count,events,NULL,&status);
assert(pixels == cache_info->pixels);
events=(cl_event *) RelinquishMagickMemory(events);
RelinquishOpenCLCommandQueue(clEnv,queue);
}
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *)NULL);
cache_info = (CacheInfo *)image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->clip_mask != (Image *) NULL) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->mask != (Image *) NULL) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->active_index_channel != MagickFalse) &&
(WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
return(cache_info->methods.sync_authentic_pixels_handler(image,exception));
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheIndexes() writes the colormap indexes to the specified
% region of the pixel cache.
%
% The format of the WritePixelCacheIndexes() method is:
%
% MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const IndexPacket
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=(MagickSizeType) length*rows;
p=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*magick_restrict q;
/*
Write indexes to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write indexes to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *)
p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write indexes to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const PixelPacket
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
p=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*p),length,(const unsigned char *) p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 4296.0
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *);
static ssize_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
% ExceptionInfo *excetion)
%
% A description of each parameter follows:
%
% o ConvertPathToPolygon() returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
ssize_t
i;
if (polygon_info->edges != (EdgeInfo *) NULL)
{
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
if (polygon_info->edges[i].points != (PointInfo *) NULL)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(
polygon_info->edges);
}
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
EdgeInfo
*p;
ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info,
ExceptionInfo *exception)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo *) NULL);
}
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
points=(PointInfo *) RelinquishMagickMemory(points);
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
polygon_info->number_edges=edge+1;
points=(PointInfo *) NULL;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
polygon_info->number_edges=edge;
}
}
polygon_info->number_edges=edge;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges,
polygon_info->number_edges,sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
EdgeInfo
*edge_info;
edge_info=polygon_info->edges+i;
edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points,
edge_info->number_points,sizeof(*edge_info->points));
if (edge_info->points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonInfo(polygon_info));
}
}
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o ConvertPrimitiveToPath() returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PathInfo *) NULL);
}
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=CastDoubleToLong(ceil(edge.y1-0.5));
stop=CastDoubleToLong(floor(edge.y2+0.5));
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
ssize_t
x;
Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
if (status == MagickFalse)
continue;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong(
ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor(
inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5));
x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info,
ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
if ((flags & RhoValue) != 0)
resolution.x=geometry_info.rho;
resolution.y=resolution.x;
if ((flags & SigmaValue) != 0)
resolution.y=geometry_info.sigma;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
clone_info->stroke_width/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
status=SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=RenderMVGContent(composite_mask,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
double
dx,
dy;
ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (double) (MaxBezierCoordinates >> 2))
continue;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
PixelInfo
composite,
pixel;
Quantum
*magick_restrict q;
ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) ||
(y != CastDoubleToLong(ceil(gradient_vector->y1-0.5))))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat*PerceptibleReciprocal(gradient->radius);
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const double pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
quantum=sizeof(**mvg_info->primitive_info);
extent=(double) mvg_info->offset+pad+(PrimitiveExtentPad+1)*quantum;
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
if (extent == (double) CastDoubleToLong(extent))
{
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) (extent+1),quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i <= (ssize_t) extent; i++)
{
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
(*mvg_info->primitive_info)[i].text=(char *) NULL;
}
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory((size_t) (
(PrimitiveExtentPad+1)*quantum));
(void) memset(*mvg_info->primitive_info,0,(size_t) ((PrimitiveExtentPad+1)*
quantum));
*mvg_info->extent=1;
mvg_info->offset=0;
return(MagickFalse);
}
static inline double GetDrawValue(const char *magick_restrict string,
char **magick_restrict sentinal)
{
char
**magick_restrict q;
double
value;
q=sentinal;
value=InterpretLocaleValue(string,q);
sentinal=q;
return(value);
}
static int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=GetDrawValue(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
const char
*p;
ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(MagickFalse);
}
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=(size_t) PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(number_points+1),sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) (number_points+1)*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
defsDepth=0;
symbolDepth=0;
cursor=0.0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if ((graphic_context[n]->render != MagickFalse) &&
(mvg_class != (const char *) NULL) && (p > primitive))
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
if (LocaleCompare("currentColor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_alpha*=opacity;
else
graphic_context[n]->fill_alpha=QuantumRange*opacity;
if (graphic_context[n]->fill.alpha != TransparentAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
else
graphic_context[n]->fill.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
graphic_context[n]->kerning=metrics.width*
GetDrawValue(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_alpha*=opacity;
graphic_context[n]->stroke_alpha*=opacity;
}
else
{
graphic_context[n]->fill_alpha=QuantumRange*opacity;
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
region;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
(void) GetNextToken(q,&q,extent,token);
region.x=CastDoubleToLong(ceil(GetDrawValue(token,
&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
region.y=CastDoubleToLong(ceil(GetDrawValue(token,
&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
region.width=(size_t) CastDoubleToLong(floor(GetDrawValue(
token,&next_token)+0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
region.height=(size_t) floor(GetDrawValue(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) region.width,(double)
region.height,(double) region.x,(double) region.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
(void) GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(r,&r,extent,token);
if (*token == ',')
(void) GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
GetDrawValue(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_alpha*=opacity;
else
graphic_context[n]->stroke_alpha=QuantumRange*opacity;
if (graphic_context[n]->stroke.alpha != TransparentAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
else
graphic_context[n]->stroke.alpha=(MagickRealType)
ClampToQuantum(QuantumRange*(1.0-opacity));
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
status=MagickFalse;
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=CastDoubleToLong(ceil(
GetDrawValue(token,&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=CastDoubleToLong(ceil(
GetDrawValue(token,&next_token)-0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) CastDoubleToLong(
floor(GetDrawValue(token,&next_token)+0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) CastDoubleToLong(
floor(GetDrawValue(token,&next_token)+0.5));
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=GetDrawValue(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,(double) number_points);
}
if (status == MagickFalse)
break;
if ((primitive_info[j].primitive == TextPrimitive) ||
(primitive_info[j].primitive == ImagePrimitive))
if (primitive_info[j].text != (char *) NULL)
primitive_info[j].text=DestroyString(primitive_info[j].text);
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(BezierQuantum*(double) primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=GetDrawValue(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,(double) number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
double
dx,
dy,
maximum_length;
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > (MaxBezierCoordinates/100.0))
ThrowPointExpectedException(keyword,exception);
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates < 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (status == 0)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
/*
Sanity check.
*/
status&=CheckPrimitiveExtent(&mvg_info,ExpandAffine(
&graphic_context[n]->affine));
if (status == 0)
break;
status&=CheckPrimitiveExtent(&mvg_info,(double)
graphic_context[n]->stroke_width);
if (status == 0)
break;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
return(RenderMVGContent(image,draw_info,0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#00000000",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern);
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PolygonInfo **) NULL);
}
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info,exception);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
polygon_info[0]=ConvertPathToPolygon(path_info,exception);
if (polygon_info[0] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
for (i=1; i < (ssize_t) number_threads; i++)
{
EdgeInfo
*edge_info;
ssize_t
j;
polygon_info[i]=(PolygonInfo *) AcquireMagickMemory(
sizeof(*polygon_info[i]));
if (polygon_info[i] == (PolygonInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
polygon_info[i]->number_edges=0;
edge_info=polygon_info[0]->edges;
polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory(
polygon_info[0]->number_edges,sizeof(*edge_info));
if (polygon_info[i]->edges == (EdgeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges,edge_info,
polygon_info[0]->number_edges*sizeof(*edge_info));
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
polygon_info[i]->edges[j].points=(PointInfo *) NULL;
polygon_info[i]->number_edges=polygon_info[0]->number_edges;
for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++)
{
edge_info=polygon_info[0]->edges+j;
polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory(
edge_info->number_points,sizeof(*edge_info));
if (polygon_info[i]->edges[j].points == (PointInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(DestroyPolygonThreadSet(polygon_info));
}
(void) memcpy(polygon_info[i]->edges[j].points,edge_info->points,
edge_info->number_points*sizeof(*edge_info->points));
}
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge)
{
assert(edge < (ssize_t) polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < (ssize_t) polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
const PointInfo
*q;
EdgeInfo
*p;
ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon;
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
EdgeInfo
*p;
ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info,exception);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
bounds=polygon_info[0]->edges[0].bounds;
artifact=GetImageArtifact(image,"draw:render-bounding-rectangles");
if (IsStringTrue(artifact) != MagickFalse)
(void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=CastDoubleToLong(ceil(bounds.y1-0.5));
stop_y=CastDoubleToLong(floor(bounds.y2+0.5));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=CastDoubleToLong(ceil(bounds.x1-0.5));
stop_x=CastDoubleToLong(floor(bounds.x2+0.5));
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) &&
(y == CastDoubleToLong(ceil(primitive_info->point.y-0.5))))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=CastDoubleToLong(ceil(bounds.y1-0.5));
stop_y=CastDoubleToLong(floor(bounds.y2+0.5));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=CastDoubleToLong(ceil(bounds.x1-0.5));
stop_x=CastDoubleToLong(floor(bounds.x2+0.5));
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
point,
q;
ssize_t
i,
x;
ssize_t
coordinates,
y;
x=CastDoubleToLong(ceil(primitive_info->point.x-0.5));
y=CastDoubleToLong(ceil(primitive_info->point.y-0.5));
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
status&=SetImageColorspace(image,sRGBColorspace,exception);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=CastDoubleToLong(ceil(primitive_info->point.x-0.5));
y=CastDoubleToLong(ceil(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
PixelInfo
pixel,
target;
status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
status&=SyncCacheViewAuthenticPixels(image_view,exception);
if (status == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
if (*primitive_info->text != '\0')
{
MagickBooleanType
path_status;
struct stat
attributes;
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
path_status=GetPathAttributes(clone_info->filename,&attributes);
if ((path_status != MagickFalse) &&
(S_ISCHR(attributes.st_mode) == 0))
{
(void) SetImageInfo(clone_info,1,exception);
(void) CopyMagickString(clone_info->filename,
primitive_info->text,MagickPathExtent);
if (clone_info->size != (char *) NULL)
clone_info->size=DestroyString(clone_info->size);
if (clone_info->extract != (char *) NULL)
clone_info->extract=DestroyString(clone_info->extract);
if ((LocaleCompare(clone_info->magick,"file") == 0) ||
(LocaleCompare(clone_info->magick,"https") == 0) ||
(LocaleCompare(clone_info->magick,"http") == 0) ||
(LocaleCompare(clone_info->magick,"mpri") == 0) ||
(IsPathAccessible(clone_info->filename) != MagickFalse))
composite_images=ReadImage(clone_info,exception);
}
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5));
y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5));
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
status&=TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
status&=SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
status&=DrawAffineImage(image,composite_image,&affine,exception);
else
status&=CompositeImage(image,composite_image,draw_info->compose,
MagickTrue,geometry.x,geometry.y,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double)
GetPixelAlpha(image,q),q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
point_x,
point_y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
point_x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
point_y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((point_x < MagickEpsilon) && (point_y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
if (status != MagickFalse)
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap,exception));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(draw_info,p,exception);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p,exception);
status&=DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=GetDrawValue(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
double
cosine,
sine;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5*
MagickPI+MagickEpsilon)))));
status=MagickTrue;
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) MAGICK_SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) MAGICK_SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,(double) control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (CheckPrimitiveExtent(mvg_info,coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static ssize_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
PrimitiveInfo
*q;
ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
arc.y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
angle=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
x=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
y=GetDrawValue(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(-1);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(-1);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(-1);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return((ssize_t) number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
PrimitiveInfo
*p;
ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
PrimitiveInfo
*p;
ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
double
dx,
dy;
ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad_p,pad_q) \
{ \
if ((pad_p) > MaxBezierCoordinates) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \
{ \
if (~extent_p < (pad_p)) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
else \
{ \
extent_p+=(pad_p); \
stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \
MaxStrokePad,sizeof(*stroke_p)); \
} \
} \
if ((pad_q) > MaxBezierCoordinates) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \
{ \
if (~extent_q < (pad_q)) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
else \
{ \
extent_q+=(pad_q); \
stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \
MaxStrokePad,sizeof(*stroke_q)); \
} \
} \
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \
{ \
if (stroke_p != (PointInfo *) NULL) \
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \
if (stroke_q != (PointInfo *) NULL) \
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
(void) ThrowMagickException(exception,GetMagickModule(), \
ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _StrokeSegment
{
double
p,
q;
} StrokeSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*stroke_p,
*stroke_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
ssize_t
i;
size_t
arc_segments,
extent_p,
extent_q,
number_vertices;
ssize_t
j,
n,
p,
q;
StrokeSegment
dx = {0.0, 0.0},
dy = {0.0, 0.0},
inverse_slope = {0.0, 0.0},
slope = {0.0, 0.0},
theta = {0.0, 0.0};
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x;
offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y;
closed_path=(fabs(offset.x) < MagickEpsilon) &&
(fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
extent_p=2*number_vertices;
extent_q=2*number_vertices;
stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad,
sizeof(*stroke_p));
stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad,
sizeof(*stroke_q));
if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL))
{
if (stroke_p != (PointInfo *) NULL)
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
if (stroke_q != (PointInfo *) NULL)
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *)
RelinquishMagickMemory(polygon_primitive);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
stroke_q[p++]=box_q[0];
stroke_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
DisableMSCWarning(4127)
CheckPathExtent(MaxStrokePad,MaxStrokePad);
RestoreMSCWarning
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_p[p++]=box_p[4];
else
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.
q-theta.p)/(2.0*sqrt(PerceptibleReciprocal(mid))))));
DisableMSCWarning(4127)
CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad);
RestoreMSCWarning
stroke_q[q].x=box_q[1].x;
stroke_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
stroke_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
stroke_q[q++]=box_q[4];
stroke_p[p++]=box_p[4];
}
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
stroke_p[p++]=box_p[1];
stroke_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
stroke_q[q++]=box_q[4];
else
{
stroke_q[q++]=box_q[1];
stroke_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p-
theta.q)/(2.0*sqrt((double) (PerceptibleReciprocal(mid)))))));
DisableMSCWarning(4127)
CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad);
RestoreMSCWarning
stroke_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
stroke_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
stroke_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
stroke_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
stroke_p[p++]=box_p[1];
stroke_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p);
stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
5500.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
text_parser.h | /*!
* Copyright (c) 2015 by Contributors
* \file text_parser.h
* \brief iterator parser to parse text format
* \author Tianqi Chen
*/
#ifndef DMLC_DATA_TEXT_PARSER_H_
#define DMLC_DATA_TEXT_PARSER_H_
#include <dmlc/data.h>
#include <dmlc/omp.h>
#include <thread>
#include <mutex>
#include <vector>
#include <cstring>
#include <algorithm>
#include "./row_block.h"
#include "./parser.h"
namespace dmlc {
namespace data {
/*!
* \brief Text parser that parses the input lines
* and returns rows in input data
*/
template <typename IndexType, typename DType = real_t>
class TextParserBase : public ParserImpl<IndexType, DType> {
public:
explicit TextParserBase(InputSplit *source,
int nthread)
: bytes_read_(0), source_(source) {
int maxthread = std::max(omp_get_num_procs() / 2 - 4, 1);
nthread_ = std::min(maxthread, nthread);
}
virtual ~TextParserBase() {
delete source_;
}
virtual void BeforeFirst(void) {
source_->BeforeFirst();
}
virtual size_t BytesRead(void) const {
return bytes_read_;
}
virtual bool ParseNext(std::vector<RowBlockContainer<IndexType, DType> > *data) {
return FillData(data);
}
protected:
/*!
* \brief parse data into out
* \param begin beginning of buffer
* \param end end of buffer
*/
virtual void ParseBlock(const char *begin, const char *end,
RowBlockContainer<IndexType, DType> *out) = 0;
/*!
* \brief read in next several blocks of data
* \param data vector of data to be returned
* \return true if the data is loaded, false if reach end
*/
inline bool FillData(std::vector<RowBlockContainer<IndexType, DType>> *data);
/*!
* \brief start from bptr, go backward and find first endof line
* \param bptr end position to go backward
* \param begin the beginning position of buffer
* \return position of first endof line going backward, returns begin if not found
*/
static inline const char *BackFindEndLine(const char *bptr, const char *begin) {
for (; bptr != begin; --bptr) {
if (*bptr == '\n' || *bptr == '\r')
return bptr;
}
return begin;
}
/*!
* \brief Ignore UTF-8 BOM if present
* \param begin reference to begin pointer
* \param end reference to end pointer
*/
static inline void IgnoreUTF8BOM(const char **begin, const char **end) {
int count = 0;
for (count = 0; *begin != *end && count < 3; count++, ++*begin) {
if (!begin || !*begin)
break;
if (**begin != '\xEF' && count == 0)
break;
if (**begin != '\xBB' && count == 1)
break;
if (**begin != '\xBF' && count == 2)
break;
}
if (count < 3)
*begin -= count;
}
private:
// nthread
int nthread_;
// number of bytes readed
size_t bytes_read_;
// source split that provides the data
InputSplit *source_;
// exception_ptr to hold exception thrown in OMP threads
std::exception_ptr parser_exception_;
// mutex for the exception_ptr
std::mutex mutex_exception_;
};
// implementation
template <typename IndexType, typename DType>
inline bool TextParserBase<IndexType, DType>::FillData(
std::vector<RowBlockContainer<IndexType, DType> > *data) {
InputSplit::Blob chunk;
if (!source_->NextChunk(&chunk)) return false;
const int nthread = omp_get_max_threads();
// reserve space for data
data->resize(nthread);
bytes_read_ += chunk.size;
CHECK_NE(chunk.size, 0U);
const char *head = reinterpret_cast<char *>(chunk.dptr);
#pragma omp parallel num_threads(nthread)
{
try {
// threadid
int tid = omp_get_thread_num();
size_t nstep = (chunk.size + nthread - 1) / nthread;
size_t sbegin = std::min(tid * nstep, chunk.size);
size_t send = std::min((tid + 1) * nstep, chunk.size);
const char *pbegin = BackFindEndLine(head + sbegin,
head);
const char *pend;
if (tid + 1 == nthread) {
pend = head + send;
} else {
pend = BackFindEndLine(head + send,
head);
}
ParseBlock(pbegin, pend, &(*data)[tid]);
} catch (dmlc::Error& ex) {
{
std::lock_guard<std::mutex> lock(mutex_exception_);
if (!parser_exception_) {
parser_exception_ = std::current_exception();
}
}
}
}
if (parser_exception_) {
std::rethrow_exception(parser_exception_);
}
this->data_ptr_ = 0;
return true;
}
} // namespace data
} // namespace dmlc
#endif // DMLC_DATA_TEXT_PARSER_H_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.