source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
denseOverlappingJacobi.h | //
// Created by mbarb on 05/02/2018.
//
#ifndef PARALLELITERATIVE_DENSEOVERLAPPINGJACOBI_H
#define PARALLELITERATIVE_DENSEOVERLAPPINGJACOBI_H
#include "Eigen"
#include "utils.h"
#include "denseParallelJacobi.h"
namespace Iterative {
template <typename Scalar, long long SIZE>
class denseOverlappingJacobi : public denseParallelJacobi<Scalar, SIZE> {
public:
explicit denseOverlappingJacobi(
const Eigen::Matrix<Scalar, SIZE, SIZE>& A,
const Eigen::ColumnVector<Scalar, SIZE>& b,
const ulonglong iterations,
const Scalar tolerance,
const ulong workers=0L,
const ulonglong blockSize = 0L,
const ulonglong overlap = 0L) :
denseParallelJacobi<Scalar,SIZE>::denseParallelJacobi(A, b, iterations, tolerance, workers) {
this->blockSize = blockSize;
if (blockSize == 0)
this->blockSize = std::max(ulong(this->A.cols() / workers), (ulong) 1L);
if (overlap == 0)
this->overlap = blockSize/2;
splitter();
}
const Eigen::ColumnVector<Scalar, SIZE> solve() {
Eigen::ColumnVector<Scalar, SIZE> oldSolution(this->solution);
Scalar error = this->tolerance - this->tolerance;
std::vector<std::pair<ulonglong, Eigen::Matrix<Scalar,Eigen::Dynamic,Eigen::Dynamic>>> inverses(blocks.size());
Eigen::ColumnVector<Scalar, SIZE> even_solution(this->solution);
Eigen::ColumnVector<Scalar, SIZE> odd_solution(this->solution);
// Compute the inverses in parallel
#pragma omp parallel for
for (long i = 0; i < blocks.size(); ++i) {
inverses[i] = std::pair<ulonglong, Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>>(i,
this->A.block(blocks[i].startCol, blocks[i].startRow, blocks[i].cols, blocks[i].rows).inverse());
}
auto nInverses = blocks.size();
// Eigen::ColumnVector<Scalar, SIZE> buffer(this->solution);
std::vector<int> index;
for (this->iteration=0L; this->iteration < this->iterations; ++this->iteration) {
// Calculate the solution in parallel
#pragma omp parallel for firstprivate(oldSolution) schedule(dynamic)
for (int i = 0; i < inverses.size(); ++i) {
// Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldBlock = oldSolution.segment(blocks[i].startCol,
// blocks[i].cols);
Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldBlock = inverses[i].first%2 ?
odd_solution.segment(blocks[i].startCol, blocks[i].cols) :
even_solution.segment(blocks[i].startCol, blocks[i].cols);
auto zeroBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols);
zeroBlock.setZero();
auto block = inverses[i].first%2 ? odd_solution.segment(blocks[i].startCol, blocks[i].cols) :
even_solution.segment(blocks[i].startCol, blocks[i].cols);
block = inverses[i].second * (this->b - (this->A * oldSolution)).segment(blocks[i].startCol,
blocks[i].cols);
if ((oldBlock - block).template lpNorm<1>() / block.size() <= this->tolerance) {
#pragma omp critical
index.emplace_back(i);
}
zeroBlock = block;
}
// average of the two values
this->solution = (even_solution + odd_solution)/(Scalar)2.;
// not overlapping portion of the solution b
this->solution.head(overlap) = even_solution.head(overlap);
// not overlapping end portion of the solution b
this->solution.tail(overlap) = nInverses%2 ?
even_solution.tail(overlap) : odd_solution.tail(overlap);
if (!index.empty()) {
std::sort(index.rbegin(), index.rend());
for (auto i : index) {
blocks.erase(blocks.begin() + i);
inverses.erase(inverses.begin() + i);
}
index.clear();
if (inverses.empty()) break;
}
std::swap(this->solution, oldSolution);
}
std::cout << this->iteration << std::endl;
return this->solution;
}
protected:
ulonglong blockSize;
std::vector<Index> blocks;
ulonglong overlap;
void splitter() {
for (ulonglong i = 0; i < this->A.cols()-overlap; i += (blockSize-overlap))
blocks.emplace_back(Index(i, std::min(blockSize, (ulonglong) this->A.cols() - i),
i, std::min(blockSize, (ulonglong) this->A.rows() - i)));
}
private:
};
}
#endif //PARALLELITERATIVE_DENSEOVERLAPPINGJACOBI_H
|
symv_c_coo_u_hi_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/kernel_plain.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t
symv_coo_u_hi_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_COO *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT nnz = A->nnz;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < nnz; i++)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT r = A->row_indx[i];
const ALPHA_INT c = A->col_indx[i];
if (r >= c)
{
continue;
}
ALPHA_Number v;
alpha_mul_3c(v, alpha, A->values[i]);
alpha_madde(tmp[threadId][r], v, x[c]);
alpha_madde(tmp[threadId][c], v, x[r]);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
alpha_madde(y[i], alpha, x[i]);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (int i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_COO *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT thread_num = alpha_get_thread_num();
return symv_coo_u_hi_omp(alpha, A, x, beta, y);
}
|
helloMP2.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[]) {
int th_id, nthreads;
#pragma omp parallel private(th_id)
{
th_id = omp_get_thread_num();
printf("Hello World from thread %d\n", th_id);
#pragma omp barrier
if ( th_id == 0 ) {
nthreads = omp_get_num_threads();
printf("There are %d threads\n",nthreads);
}
}
return EXIT_SUCCESS;
}
|
convolution_1x1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv1x1s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _outp = vld1q_f32(outptr);
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vfmaq_f32(_outp, _p, _k0);
_outpn = vfmaq_f32(_outpn, _pn, _k0);
float32x4_t _p1 = vld1q_f32(r1);
float32x4_t _p1n = vld1q_f32(r1+4);
_outp = vfmaq_f32(_outp, _p1, _k1);
_outpn = vfmaq_f32(_outpn, _p1n, _k1);
float32x4_t _p2 = vld1q_f32(r2);
float32x4_t _p2n = vld1q_f32(r2+4);
_outp = vfmaq_f32(_outp, _p2, _k2);
_outpn = vfmaq_f32(_outpn, _p2n, _k2);
float32x4_t _p3 = vld1q_f32(r3);
float32x4_t _p3n = vld1q_f32(r3+4);
_outp = vfmaq_f32(_outp, _p3, _k3);
_outpn = vfmaq_f32(_outpn, _p3n, _k3);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128] \n"
"vmla.f32 q0, q2, %q12 \n"
"vmla.f32 q1, q3, %q12 \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3 :128]! \n"
"vmla.f32 q0, q2, %q13 \n"
"vmla.f32 q1, q3, %q13 \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4 :128]! \n"
"vmla.f32 q0, q2, %q14 \n"
"vmla.f32 q1, q3, %q14 \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q0, q2, %q15 \n"
"vmla.f32 q1, q3, %q15 \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0++;
r1++;
r2++;
r3++;
outptr++;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
int size = outw * outh;
#if __ARM_NEON
int nn = size >> 3;
int remain = size & 7;
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _p = vld1q_f32(r0);
float32x4_t _outp = vld1q_f32(outptr);
float32x4_t _pn = vld1q_f32(r0+4);
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vfmaq_f32(_outp, _p, _k0);
_outpn = vfmaq_f32(_outpn, _pn, _k0);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 8;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128] \n"
"vmla.f32 q0, q2, %q6 \n"
"vmla.f32 q1, q3, %q6 \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2 :128]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0) // %2
: "0"(nn),
"1"(outptr),
"2"(r0),
"w"(_k0) // %6
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0++;
outptr++;
}
}
}
}
static void conv1x1s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
float32x4_t _k1 = vdupq_n_f32(k1);
float32x4_t _k2 = vdupq_n_f32(k2);
float32x4_t _k3 = vdupq_n_f32(k3);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _px2 = vld2q_f32(r0);
float32x4_t _p = _px2.val[0];
float32x4_t _outp = vld1q_f32(outptr);
float32x4x2_t _pnx2 = vld2q_f32(r0+8);
float32x4_t _pn = _pnx2.val[0];
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vmlaq_f32(_outp, _p, _k0);
_outpn = vmlaq_f32(_outpn, _pn, _k0);
float32x4x2_t _p1x2 = vld2q_f32(r1);
float32x4_t _p1 = _p1x2.val[0];
float32x4x2_t _p1nx2 = vld2q_f32(r1+8);
float32x4_t _p1n = _p1nx2.val[0];
_outp = vmlaq_f32(_outp, _p1, _k1);
_outpn = vmlaq_f32(_outpn, _p1n, _k1);
float32x4x2_t _p2x2 = vld2q_f32(r2);
float32x4_t _p2 = _p2x2.val[0];
float32x4x2_t _p2nx2 = vld2q_f32(r2+8);
float32x4_t _p2n = _p2nx2.val[0];
_outp = vmlaq_f32(_outp, _p2, _k2);
_outpn = vmlaq_f32(_outpn, _p2n, _k2);
float32x4x2_t _p3x2 = vld2q_f32(r3);
float32x4_t _p3 = _p3x2.val[0];
float32x4x2_t _p3nx2 = vld2q_f32(r3+8);
float32x4_t _p3n = _p3nx2.val[0];
_outp = vmlaq_f32(_outp, _p3, _k3);
_outpn = vmlaq_f32(_outpn, _p3n, _k3);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1] \n"
"vmla.f32 q0, q2, %q12 \n"
"vmla.f32 q1, q8, %q12 \n"
"pld [%3, #512] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vld2.f32 {d16-d19}, [%3]! \n"
"vmla.f32 q0, q2, %q13 \n"
"vmla.f32 q1, q8, %q13 \n"
"pld [%4, #512] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vld2.f32 {d16-d19}, [%4]! \n"
"vmla.f32 q0, q2, %q14 \n"
"vmla.f32 q1, q8, %q14 \n"
"pld [%5, #512] \n"
"vld2.f32 {d4-d7}, [%5]! \n"
"vld2.f32 {d16-d19}, [%5]! \n"
"vmla.f32 q0, q2, %q15 \n"
"vmla.f32 q1, q8, %q15 \n"
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1]! \n"
"bne 0b \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0), // %12
"w"(_k1), // %13
"w"(_k2), // %14
"w"(_k3) // %15
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(k0);
#if __aarch64__
for (; nn>0; nn--)
{
float32x4x2_t _px2 = vld2q_f32(r0);
float32x4_t _p = _px2.val[0];
float32x4_t _outp = vld1q_f32(outptr);
float32x4x2_t _pnx2 = vld2q_f32(r0+8);
float32x4_t _pn = _pnx2.val[0];
float32x4_t _outpn = vld1q_f32(outptr+4);
_outp = vmlaq_f32(_outp, _p, _k0);
_outpn = vmlaq_f32(_outpn, _pn, _k0);
vst1q_f32(outptr, _outp);
vst1q_f32(outptr+4, _outpn);
r0 += 16;
outptr += 8;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1] \n"
"vmla.f32 q0, q2, %q6 \n"
"vmla.f32 q1, q8, %q6 \n"
"pld [%2, #512] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vld2.f32 {d16-d19}, [%2]! \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d3}, [%1]! \n"
"bne 0b \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0) // %2
: "0"(nn),
"1"(outptr),
"2"(r0),
"w"(_k0) // %6
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0 += 2;
outptr++;
}
r0 += tailstep;
}
}
}
}
|
fixed_version.c | #include<stdio.h>
int main(){
int sum = 1;
int i =1;
// increase sum by one each iteratiob using openmp
#pragma omp parallel for firstprivate(i) reduction( + : sum )
for (int j = i; j < 100; j++) {
sum +=1;
}
printf("sum is %d\n",sum);
}
|
Example_target_reduction.1.c | /*
* @@name: target_reduction.1.c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_5.0
*/
#include <stdio.h>
int f(int);
int g(int);
int main()
{
int sum1=0, sum2=0;
int i;
const int n = 100;
#pragma omp target teams distribute reduction(+:sum1)
for (int i = 0; i < n; i++) {
sum1 += f(i);
}
#pragma omp target teams distribute reduction(+:sum2)
for (int i = 0; i < n; i++) {
sum2 += g(i) * sum1;
}
printf( "sum1 = %d, sum2 = %d\n", sum1, sum2);
//OUTPUT: sum1 = 9900, sum2 = 147015000
return 0;
}
int f(int res){ return res*2; }
int g(int res){ return res*3; }
|
bktree.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef VECTORSEARCH_THIRD_PARTY_SPTAG_BKTREE_H_
#define VECTORSEARCH_THIRD_PARTY_SPTAG_BKTREE_H_
#include <iostream>
#include <stack>
#include <string>
#include <vector>
#include "vector_index.h"
#include "common_utils.h"
#include "query_result_set.h"
#include "workspace.h"
//#include "absl/container/flat_hash_map.h"
#pragma warning(disable : 4996) // 'fopen': This function or variable may be
// unsafe. Consider using fopen_s instead. To
// disable deprecation, use
// _CRT_SECURE_NO_WARNINGS. See online help for
// details.
namespace vsearch {
namespace COMMON {
// node type for storing BKT
struct BKTNode {
int centerid;
int childStart;
int childEnd;
BKTNode(int cid = -1) : centerid(cid), childStart(-1), childEnd(-1) {}
};
template <typename T>
struct KmeansArgs {
int _K;
int _D;
int _T;
T* centers;
int* counts;
float* newCenters;
int* newCounts;
char* label;
int* clusterIdx;
float* clusterDist;
T* newTCenters;
KmeansArgs(int k, int dim, int datasize, int threadnum)
: _K(k), _D(dim), _T(threadnum) {
centers = new T[k * dim];
counts = new int[k];
newCenters = new float[threadnum * k * dim];
newCounts = new int[threadnum * k];
label = new char[datasize];
clusterIdx = new int[threadnum * k];
clusterDist = new float[threadnum * k];
newTCenters = new T[k * dim];
}
~KmeansArgs() {
delete[] centers;
delete[] counts;
delete[] newCenters;
delete[] newCounts;
delete[] label;
delete[] clusterIdx;
delete[] clusterDist;
delete[] newTCenters;
}
inline void ClearCounts() { memset(newCounts, 0, sizeof(int) * _T * _K); }
inline void ClearCenters() {
memset(newCenters, 0, sizeof(float) * _T * _K * _D);
}
inline void ClearDists(float dist) {
for (int i = 0; i < _T * _K; i++) {
clusterIdx[i] = -1;
clusterDist[i] = dist;
}
}
void Shuffle(std::vector<int>& indices, int first, int last) {
int* pos = new int[_K];
pos[0] = first;
for (int k = 1; k < _K; k++) pos[k] = pos[k - 1] + newCounts[k - 1];
for (int k = 0; k < _K; k++) {
if (newCounts[k] == 0) continue;
int i = pos[k];
while (newCounts[k] > 0) {
int swapid = pos[(int)(label[i])] + newCounts[(int)(label[i])] - 1;
newCounts[(int)(label[i])]--;
std::swap(indices[i], indices[swapid]);
std::swap(label[i], label[swapid]);
}
while (indices[i] != clusterIdx[k]) i++;
std::swap(indices[i], indices[pos[k] + counts[k] - 1]);
}
delete[] pos;
}
};
class BKTree {
public:
BKTree()
: m_iTreeNumber(1),
m_iBKTKmeansK(32),
m_iBKTLeafSize(8),
m_iSamples(1000) {}
BKTree(BKTree& other)
: m_iTreeNumber(other.m_iTreeNumber),
m_iBKTKmeansK(other.m_iBKTKmeansK),
m_iBKTLeafSize(other.m_iBKTLeafSize),
m_iSamples(other.m_iSamples) {}
~BKTree() {}
inline const BKTNode& operator[](int index) const {
return m_pTreeRoots[index];
}
inline BKTNode& operator[](int index) { return m_pTreeRoots[index]; }
inline int size() const { return (int)m_pTreeRoots.size(); }
inline const std::unordered_map<int, int> *const GetSampleMap() const {
return &m_pSampleCenterMap;
}
template <typename T>
void BuildTrees(VectorIndex* index, std::vector<int>* indices = nullptr) {
struct BKTStackItem {
int index, first, last;
BKTStackItem(int index_, int first_, int last_)
: index(index_), first(first_), last(last_) {}
};
std::stack<BKTStackItem> ss;
std::vector<int> localindices;
if (indices == nullptr) {
localindices.resize(index->GetNumSamples());
for (int i = 0; i < index->GetNumSamples(); i++) localindices[i] = i;
} else {
localindices.assign(indices->begin(), indices->end());
}
KmeansArgs<T> args(m_iBKTKmeansK, index->GetFeatureDim(),
(int)localindices.size(), omp_get_num_threads());
m_pSampleCenterMap.clear();
for (char i = 0; i < m_iTreeNumber; i++) {
std::random_shuffle(localindices.begin(), localindices.end());
m_pTreeStart.push_back((int)m_pTreeRoots.size());
m_pTreeRoots.push_back(BKTNode((int)localindices.size()));
std::cout << "Start to build BKTree " << i + 1 << std::endl;
ss.push(BKTStackItem(m_pTreeStart[i], 0, (int)localindices.size()));
while (!ss.empty()) {
BKTStackItem item = ss.top();
ss.pop();
int newBKTid = (int)m_pTreeRoots.size();
m_pTreeRoots[item.index].childStart = newBKTid;
if (item.last - item.first <= m_iBKTLeafSize) {
for (int j = item.first; j < item.last; j++) {
m_pTreeRoots.push_back(BKTNode(localindices[j]));
}
} else { // clustering the data into BKTKmeansK clusters
int numClusters = KmeansClustering(index, localindices, item.first,
item.last, args);
if (numClusters <= 1) {
int end = min(item.last + 1, (int)localindices.size());
std::sort(localindices.begin() + item.first,
localindices.begin() + end);
m_pTreeRoots[item.index].centerid = localindices[item.first];
m_pTreeRoots[item.index].childStart =
-m_pTreeRoots[item.index].childStart;
for (int j = item.first + 1; j < end; j++) {
m_pTreeRoots.push_back(BKTNode(localindices[j]));
m_pSampleCenterMap[localindices[j]] =
m_pTreeRoots[item.index].centerid;
}
m_pSampleCenterMap[-1 - m_pTreeRoots[item.index].centerid] =
item.index;
} else {
for (int k = 0; k < m_iBKTKmeansK; k++) {
if (args.counts[k] == 0) continue;
m_pTreeRoots.push_back(
BKTNode(localindices[item.first + args.counts[k] - 1]));
if (args.counts[k] > 1)
ss.push(BKTStackItem(newBKTid++, item.first,
item.first + args.counts[k] - 1));
item.first += args.counts[k];
}
}
}
m_pTreeRoots[item.index].childEnd = (int)m_pTreeRoots.size();
}
std::cout << i + 1 << " BKTree built, "
<< m_pTreeRoots.size() - m_pTreeStart[i] << " "
<< localindices.size() << std::endl;
}
}
bool SaveTrees(std::string sTreeFileName) const {
std::cout << "Save BKT to " << sTreeFileName << std::endl;
FILE* fp = fopen(sTreeFileName.c_str(), "wb");
if (fp == NULL) return false;
fwrite(&m_iTreeNumber, sizeof(int), 1, fp);
fwrite(m_pTreeStart.data(), sizeof(int), m_iTreeNumber, fp);
int treeNodeSize = (int)m_pTreeRoots.size();
fwrite(&treeNodeSize, sizeof(int), 1, fp);
fwrite(m_pTreeRoots.data(), sizeof(BKTNode), treeNodeSize, fp);
fclose(fp);
std::cout << "Save BKT (" << m_iTreeNumber << "," << treeNodeSize
<< ") Finish!" << std::endl;
return true;
}
bool LoadTrees(char* pBKTMemFile) {
m_iTreeNumber = *((int*)pBKTMemFile);
pBKTMemFile += sizeof(int);
m_pTreeStart.resize(m_iTreeNumber);
memcpy(m_pTreeStart.data(), pBKTMemFile, sizeof(int) * m_iTreeNumber);
pBKTMemFile += sizeof(int) * m_iTreeNumber;
int treeNodeSize = *((int*)pBKTMemFile);
pBKTMemFile += sizeof(int);
m_pTreeRoots.resize(treeNodeSize);
memcpy(m_pTreeRoots.data(), pBKTMemFile, sizeof(BKTNode) * treeNodeSize);
return true;
}
bool LoadTrees(std::string sTreeFileName) {
std::cout << "Load BKT From " << sTreeFileName << std::endl;
FILE* fp = fopen(sTreeFileName.c_str(), "rb");
if (fp == NULL) return false;
fread(&m_iTreeNumber, sizeof(int), 1, fp);
m_pTreeStart.resize(m_iTreeNumber);
fread(m_pTreeStart.data(), sizeof(int), m_iTreeNumber, fp);
int treeNodeSize;
fread(&treeNodeSize, sizeof(int), 1, fp);
m_pTreeRoots.resize(treeNodeSize);
fread(m_pTreeRoots.data(), sizeof(BKTNode), treeNodeSize, fp);
fclose(fp);
std::cout << "Load BKT (" << m_iTreeNumber << "," << treeNodeSize
<< ") Finish!" << std::endl;
return true;
}
template <typename T>
void InitSearchTrees(const VectorIndex* p_index,
const COMMON::QueryResultSet<T>& p_query,
COMMON::WorkSpace& p_space) const {
for (char i = 0; i < m_iTreeNumber; i++) {
const BKTNode& node = m_pTreeRoots[m_pTreeStart[i]];
if (node.childStart < 0) {
p_space.m_SPTQueue.insert(COMMON::HeapCell(
m_pTreeStart[i],
p_index->ComputeDistance((const void*)p_query.GetTarget(),
p_index->GetSample(node.centerid))));
} else {
for (int begin = node.childStart; begin < node.childEnd; begin++) {
int index = m_pTreeRoots[begin].centerid;
p_space.m_SPTQueue.insert(COMMON::HeapCell(
begin, p_index->ComputeDistance((const void*)p_query.GetTarget(),
p_index->GetSample(index))));
}
}
}
}
template <typename T>
void SearchTrees(const VectorIndex* p_index,
const COMMON::QueryResultSet<T>& p_query,
COMMON::WorkSpace& p_space, const int p_limits) const {
do {
COMMON::HeapCell bcell = p_space.m_SPTQueue.pop();
const BKTNode& tnode = m_pTreeRoots[bcell.node];
if (tnode.childStart < 0) {
if (!p_space.CheckAndSet(tnode.centerid)) {
p_space.m_iNumberOfCheckedLeaves++;
p_space.m_NGQueue.insert(
COMMON::HeapCell(tnode.centerid, bcell.distance));
}
if (p_space.m_iNumberOfCheckedLeaves >= p_limits) break;
} else {
if (!p_space.CheckAndSet(tnode.centerid)) {
p_space.m_NGQueue.insert(
COMMON::HeapCell(tnode.centerid, bcell.distance));
}
for (int begin = tnode.childStart; begin < tnode.childEnd; begin++) {
int index = m_pTreeRoots[begin].centerid;
p_space.m_SPTQueue.insert(COMMON::HeapCell(
begin, p_index->ComputeDistance((const void*)p_query.GetTarget(),
p_index->GetSample(index))));
}
}
} while (!p_space.m_SPTQueue.empty());
}
private:
template <typename T>
float KmeansAssign(VectorIndex* p_index, std::vector<int>& indices,
const int first, const int last, KmeansArgs<T>& args,
const bool updateCenters) const {
float currDist = 0;
int threads = omp_get_num_threads();
float lambda = (updateCenters)
? Utils::GetBase<T>() *
Utils::GetBase<T>() /
(100.0f * (last - first))
: 0.0f;
int subsize = (last - first - 1) / threads + 1;
#pragma omp parallel for
for (int tid = 0; tid < threads; tid++) {
int istart = first + tid * subsize;
int iend = min(first + (tid + 1) * subsize, last);
int* inewCounts = args.newCounts + tid * m_iBKTKmeansK;
float* inewCenters =
args.newCenters + tid * m_iBKTKmeansK * p_index->GetFeatureDim();
int* iclusterIdx = args.clusterIdx + tid * m_iBKTKmeansK;
float* iclusterDist = args.clusterDist + tid * m_iBKTKmeansK;
float idist = 0;
for (int i = istart; i < iend; i++) {
int clusterid = 0;
float smallestDist = MaxDist;
for (int k = 0; k < m_iBKTKmeansK; k++) {
float dist =
p_index->ComputeDistance(
p_index->GetSample(indices[i]),
(const void*)(args.centers + k * p_index->GetFeatureDim())) +
lambda * args.counts[k];
if (dist > -MaxDist && dist < smallestDist) {
clusterid = k;
smallestDist = dist;
}
}
args.label[i] = clusterid;
inewCounts[clusterid]++;
idist += smallestDist;
if (updateCenters) {
const T* v = (const T*)p_index->GetSample(indices[i]);
float* center = inewCenters + clusterid * p_index->GetFeatureDim();
for (int j = 0; j < p_index->GetFeatureDim(); j++) center[j] += v[j];
if (smallestDist > iclusterDist[clusterid]) {
iclusterDist[clusterid] = smallestDist;
iclusterIdx[clusterid] = indices[i];
}
} else {
if (smallestDist <= iclusterDist[clusterid]) {
iclusterDist[clusterid] = smallestDist;
iclusterIdx[clusterid] = indices[i];
}
}
}
Utils::atomic_float_add(&currDist, idist);
}
for (int i = 1; i < threads; i++) {
for (int k = 0; k < m_iBKTKmeansK; k++)
args.newCounts[k] += args.newCounts[i * m_iBKTKmeansK + k];
}
if (updateCenters) {
for (int i = 1; i < threads; i++) {
float* currCenter =
args.newCenters + i * m_iBKTKmeansK * p_index->GetFeatureDim();
for (int j = 0; j < m_iBKTKmeansK * p_index->GetFeatureDim(); j++)
args.newCenters[j] += currCenter[j];
}
int maxcluster = 0;
for (int k = 1; k < m_iBKTKmeansK; k++)
if (args.newCounts[maxcluster] < args.newCounts[k]) maxcluster = k;
int maxid = maxcluster;
for (int tid = 1; tid < threads; tid++) {
if (args.clusterDist[maxid] <
args.clusterDist[tid * m_iBKTKmeansK + maxcluster])
maxid = tid * m_iBKTKmeansK + maxcluster;
}
if (args.clusterIdx[maxid] < 0 ||
args.clusterIdx[maxid] >= p_index->GetNumSamples())
std::cout << "first:" << first << " last:" << last
<< " maxcluster:" << maxcluster << "("
<< args.newCounts[maxcluster] << ") Error maxid:" << maxid
<< " dist:" << args.clusterDist[maxid] << std::endl;
maxid = args.clusterIdx[maxid];
for (int k = 0; k < m_iBKTKmeansK; k++) {
T* TCenter = args.newTCenters + k * p_index->GetFeatureDim();
if (args.newCounts[k] == 0) {
// int nextid = Utils::rand_int(last, first);
// while (args.label[nextid] != maxcluster) nextid =
// Utils::rand_int(last, first);
int nextid = maxid;
std::memcpy(TCenter, p_index->GetSample(nextid),
sizeof(T) * p_index->GetFeatureDim());
} else {
float* currCenters = args.newCenters + k * p_index->GetFeatureDim();
for (int j = 0; j < p_index->GetFeatureDim(); j++)
currCenters[j] /= args.newCounts[k];
if (p_index->GetDistCalcMethod() == DistCalcMethod::Cosine) {
Utils::Normalize(currCenters, p_index->GetFeatureDim(),
Utils::GetBase<T>());
}
for (int j = 0; j < p_index->GetFeatureDim(); j++)
TCenter[j] = (T)(currCenters[j]);
}
}
} else {
for (int i = 1; i < threads; i++) {
for (int k = 0; k < m_iBKTKmeansK; k++) {
if (args.clusterIdx[i * m_iBKTKmeansK + k] != -1 &&
args.clusterDist[i * m_iBKTKmeansK + k] <= args.clusterDist[k]) {
args.clusterDist[k] = args.clusterDist[i * m_iBKTKmeansK + k];
args.clusterIdx[k] = args.clusterIdx[i * m_iBKTKmeansK + k];
}
}
}
}
return currDist;
}
template <typename T>
int KmeansClustering(VectorIndex* p_index, std::vector<int>& indices,
const int first, const int last,
KmeansArgs<T>& args) const {
int iterLimit = 100;
int batchEnd = min(first + m_iSamples, last);
float currDiff, currDist, minClusterDist = MaxDist;
for (int numKmeans = 0; numKmeans < 3; numKmeans++) {
for (int k = 0; k < m_iBKTKmeansK; k++) {
int randid = Utils::rand_int(last, first);
std::memcpy(args.centers + k * p_index->GetFeatureDim(),
p_index->GetSample(indices[randid]),
sizeof(T) * p_index->GetFeatureDim());
}
args.ClearCounts();
currDist = KmeansAssign(p_index, indices, first, batchEnd, args, false);
if (currDist < minClusterDist) {
minClusterDist = currDist;
memcpy(args.newTCenters, args.centers,
sizeof(T) * m_iBKTKmeansK * p_index->GetFeatureDim());
memcpy(args.counts, args.newCounts, sizeof(int) * m_iBKTKmeansK);
}
}
minClusterDist = MaxDist;
int noImprovement = 0;
for (int iter = 0; iter < iterLimit; iter++) {
std::memcpy(args.centers, args.newTCenters,
sizeof(T) * m_iBKTKmeansK * p_index->GetFeatureDim());
std::random_shuffle(indices.begin() + first, indices.begin() + last);
args.ClearCenters();
args.ClearCounts();
args.ClearDists(-MaxDist);
currDist = KmeansAssign(p_index, indices, first, batchEnd, args, true);
memcpy(args.counts, args.newCounts, sizeof(int) * m_iBKTKmeansK);
currDiff = 0;
for (int k = 0; k < m_iBKTKmeansK; k++) {
currDiff += p_index->ComputeDistance(
(const void*)(args.centers + k * p_index->GetFeatureDim()),
(const void*)(args.newTCenters + k * p_index->GetFeatureDim()));
}
if (currDist < minClusterDist) {
noImprovement = 0;
minClusterDist = currDist;
} else {
noImprovement++;
}
if (currDiff < 1e-3 || noImprovement >= 5) break;
}
args.ClearCounts();
args.ClearDists(MaxDist);
currDist = KmeansAssign(p_index, indices, first, last, args, false);
memcpy(args.counts, args.newCounts, sizeof(int) * m_iBKTKmeansK);
int numClusters = 0;
for (int i = 0; i < m_iBKTKmeansK; i++)
if (args.counts[i] > 0) numClusters++;
if (numClusters <= 1) {
// if (last - first > 1) std::cout << "large cluster:" << last - first <<
// " dist:" << currDist << std::endl;
return numClusters;
}
args.Shuffle(indices, first, last);
return numClusters;
}
private:
std::vector<int> m_pTreeStart;
std::vector<BKTNode> m_pTreeRoots;
std::unordered_map<int, int> m_pSampleCenterMap;
//absl::flat_hash_map<int, int> m_pSampleCenterMap;
public:
int m_iTreeNumber, m_iBKTKmeansK, m_iBKTLeafSize, m_iSamples;
};
} // COMMON
} // vsearch
#endif //VECTORSEARCH_THIRD_PARTY_SPTAG_BKTREE_H_
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
register ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L A H E I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLAHEImage() is a variant of adaptive histogram equalization in which the
% contrast amplification is limited, so as to reduce this problem of noise
% amplification.
%
% Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in
% "Graphics Gems IV", Academic Press, 1994.
%
% The format of the CLAHEImage method is:
%
% MagickBooleanType CLAHEImage(Image *image,const size_t width,
% const size_t height,const size_t number_bins,const double clip_limit,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the tile divisions to use in horizontal direction.
%
% o height: the height of the tile divisions to use in vertical direction.
%
% o number_bins: number of bins for histogram ("dynamic range").
%
% o clip_limit: contrast limit for localised changes in contrast. A limit
% less than 1 results in standard non-contrast limited AHE.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RangeInfo
{
unsigned short
min,
max;
} RangeInfo;
static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins,
size_t *histogram)
{
#define NumberCLAHEGrays (65536)
register ssize_t
i;
size_t
cumulative_excess,
previous_excess,
step;
ssize_t
excess;
/*
Compute total number of excess pixels.
*/
cumulative_excess=0;
for (i=0; i < (ssize_t) number_bins; i++)
{
excess=(ssize_t) histogram[i]-(ssize_t) clip_limit;
if (excess > 0)
cumulative_excess+=excess;
}
/*
Clip histogram and redistribute excess pixels across all bins.
*/
step=cumulative_excess/number_bins;
excess=(ssize_t) (clip_limit-step);
for (i=0; i < (ssize_t) number_bins; i++)
{
if ((double) histogram[i] > clip_limit)
histogram[i]=(size_t) clip_limit;
else
if ((ssize_t) histogram[i] > excess)
{
cumulative_excess-=histogram[i]-excess;
histogram[i]=(size_t) clip_limit;
}
else
{
cumulative_excess-=step;
histogram[i]+=step;
}
}
/*
Redistribute remaining excess.
*/
do
{
register size_t
*p;
size_t
*q;
previous_excess=cumulative_excess;
p=histogram;
q=histogram+number_bins;
while ((cumulative_excess != 0) && (p < q))
{
step=number_bins/cumulative_excess;
if (step < 1)
step=1;
for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step)
if ((double) *p < clip_limit)
{
(*p)++;
cumulative_excess--;
}
p++;
}
} while ((cumulative_excess != 0) && (cumulative_excess < previous_excess));
}
static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const size_t number_bins,
const unsigned short *lut,const unsigned short *pixels,size_t *histogram)
{
register const unsigned short
*p;
register ssize_t
i;
/*
Classify the pixels into a gray histogram.
*/
for (i=0; i < (ssize_t) number_bins; i++)
histogram[i]=0L;
p=pixels;
for (i=0; i < (ssize_t) tile_info->height; i++)
{
const unsigned short
*q;
q=p+tile_info->width;
while (p < q)
histogram[lut[*p++]]++;
q+=clahe_info->width;
p=q-tile_info->width;
}
}
static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12,
const size_t *Q22,const size_t *Q11,const size_t *Q21,
const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels)
{
ssize_t
y;
unsigned short
intensity;
/*
Bilinear interpolate four tiles to eliminate boundary artifacts.
*/
for (y=(ssize_t) tile->height; y > 0; y--)
{
register ssize_t
x;
for (x=(ssize_t) tile->width; x > 0; x--)
{
intensity=lut[*pixels];
*pixels++=(unsigned short ) (PerceptibleReciprocal((double) tile->width*
tile->height)*(y*(x*Q12[intensity]+(tile->width-x)*Q22[intensity])+
(tile->height-y)*(x*Q11[intensity]+(tile->width-x)*Q21[intensity])));
}
pixels+=(clahe_info->width-tile->width);
}
}
static void GenerateCLAHELut(const RangeInfo *range_info,
const size_t number_bins,unsigned short *lut)
{
ssize_t
i;
unsigned short
delta;
/*
Scale input image [intensity min,max] to [0,number_bins-1].
*/
delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1);
for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++)
lut[i]=(unsigned short) ((i-range_info->min)/delta);
}
static void MapCLAHEHistogram(const RangeInfo *range_info,
const size_t number_bins,const size_t number_pixels,size_t *histogram)
{
double
scale,
sum;
register ssize_t
i;
/*
Rescale histogram to range [min-intensity .. max-intensity].
*/
scale=(double) (range_info->max-range_info->min)/number_pixels;
sum=0.0;
for (i=0; i < (ssize_t) number_bins; i++)
{
sum+=histogram[i];
histogram[i]=(size_t) (range_info->min+scale*sum);
if (histogram[i] > range_info->max)
histogram[i]=range_info->max;
}
}
static MagickBooleanType CLAHE(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const RangeInfo *range_info,
const size_t number_bins,const double clip_limit,unsigned short *pixels)
{
MemoryInfo
*tile_cache;
register unsigned short
*p;
size_t
limit,
*tiles;
ssize_t
y;
unsigned short
lut[NumberCLAHEGrays];
/*
Constrast limited adapted histogram equalization.
*/
if (clip_limit == 1.0)
return(MagickTrue);
tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*clahe_info->y,
number_bins*sizeof(*tiles));
if (tile_cache == (MemoryInfo *) NULL)
return(MagickFalse);
tiles=(size_t *) GetVirtualMemoryBlob(tile_cache);
limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins);
if (limit < 1UL)
limit=1UL;
/*
Generate greylevel mappings for each tile.
*/
GenerateCLAHELut(range_info,number_bins,lut);
p=pixels;
for (y=0; y < (ssize_t) clahe_info->y; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) clahe_info->x; x++)
{
size_t
*histogram;
histogram=tiles+(number_bins*(y*clahe_info->x+x));
GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram);
ClipCLAHEHistogram((double) limit,number_bins,histogram);
MapCLAHEHistogram(range_info,number_bins,tile_info->width*
tile_info->height,histogram);
p+=tile_info->width;
}
p+=clahe_info->width*(tile_info->height-1);
}
/*
Interpolate greylevel mappings to get CLAHE image.
*/
p=pixels;
for (y=0; y <= (ssize_t) clahe_info->y; y++)
{
OffsetInfo
offset;
RectangleInfo
tile;
register ssize_t
x;
tile.height=tile_info->height;
tile.y=y-1;
offset.y=tile.y+1;
if (y == 0)
{
/*
Top row.
*/
tile.height=tile_info->height >> 1;
tile.y=0;
offset.y=0;
}
else
if (y == (ssize_t) clahe_info->y)
{
/*
Bottom row.
*/
tile.height=(tile_info->height+1) >> 1;
tile.y=clahe_info->y-1;
offset.y=tile.y;
}
for (x=0; x <= (ssize_t) clahe_info->x; x++)
{
tile.width=tile_info->width;
tile.x=x-1;
offset.x=tile.x+1;
if (x == 0)
{
/*
Left column.
*/
tile.width=tile_info->width >> 1;
tile.x=0;
offset.x=0;
}
else
if (x == (ssize_t) clahe_info->x)
{
/*
Right column.
*/
tile.width=(tile_info->width+1) >> 1;
tile.x=clahe_info->x-1;
offset.x=tile.x;
}
InterpolateCLAHE(clahe_info,
tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */
tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */
tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */
tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */
&tile,lut,p);
p+=tile.width;
}
p+=clahe_info->width*(tile.height-1);
}
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickTrue);
}
MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width,
const size_t height,const size_t number_bins,const double clip_limit,
ExceptionInfo *exception)
{
#define CLAHEImageTag "CLAHE/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
MagickBooleanType
status;
MagickOffsetType
progress;
MemoryInfo
*pixel_cache;
RangeInfo
range_info;
RectangleInfo
clahe_info,
tile_info;
size_t
n;
ssize_t
y;
unsigned short
*pixels;
/*
Configure CLAHE parameters.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
range_info.min=0;
range_info.max=NumberCLAHEGrays-1;
tile_info.width=width;
if (tile_info.width == 0)
tile_info.width=image->columns >> 3;
tile_info.height=height;
if (tile_info.height == 0)
tile_info.height=image->rows >> 3;
tile_info.x=0;
if ((image->columns % tile_info.width) != 0)
tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width);
tile_info.y=0;
if ((image->rows % tile_info.height) != 0)
tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height);
clahe_info.width=image->columns+tile_info.x;
clahe_info.height=image->rows+tile_info.y;
clahe_info.x=(ssize_t) clahe_info.width/tile_info.width;
clahe_info.y=(ssize_t) clahe_info.height/tile_info.height;
pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height*
sizeof(*pixels));
if (pixel_cache == (MemoryInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache);
colorspace=image->colorspace;
if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse)
{
pixel_cache=RelinquishVirtualMemory(pixel_cache);
return(MagickFalse);
}
/*
Initialize CLAHE pixels.
*/
image_view=AcquireVirtualCacheView(image,exception);
progress=0;
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) clahe_info.height; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y-
(tile_info.y >> 1),clahe_info.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) clahe_info.width; x++)
{
pixels[n++]=ScaleQuantumToShort(p[0]);
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ?
(size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels);
if (status == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
/*
Push CLAHE pixels to CLAHE image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
n=clahe_info.width*(tile_info.y >> 1);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
n+=tile_info.x >> 1;
for (x=0; x < (ssize_t) image->columns; x++)
{
q[0]=ScaleShortToQuantum(pixels[n++]);
q+=GetPixelChannels(image);
}
n+=(clahe_info.width-image->columns-(tile_info.x >> 1));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
pixel_cache=RelinquishVirtualMemory(pixel_cache);
if (TransformImageColorspace(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
register ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
status=InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
(void) GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black));
white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
if (black[i] != white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (black[j] == white[j])
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastStretchImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,0,0,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
register const Quantum
*magick_restrict r;
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
}
SetPixelViaPixelInfo(enhance_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=(double) p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(intensity))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) memset(black,0,sizeof(*black));
(void) memset(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaImageTag "Gamma/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,1.0/gamma)));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType)
q[j]))];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GammaImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
pixel2=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel3=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel3);
offset+=cube_size;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel4);
pixel=zero;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
point.z,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
1.0/gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if( grayscale != MagickFalse )
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (IsPixelGray(image,q) != MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
GB_unop__log1p_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log1p_fc32_fc32)
// op(A') function: GB (_unop_tran__log1p_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_clog1pf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_clog1pf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_clog1pf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG1P || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log1p_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog1pf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_clog1pf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log1p_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_lr_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
#define MAX_C_CONNECTIONS 100
#define HAVE_COMMON_C 1
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildStdInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int sep_weight,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
//HYPRE_BigInt *found;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
HYPRE_Real *ahat = NULL;
HYPRE_Real *ahat_offd = NULL;
HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
HYPRE_Real diagonal, distribute;
HYPRE_Real alfa = 1.;
HYPRE_Real beta = 1.;
/* Loop variables */
// HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, j1, jj, kk, k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Real wall_1 = 0;
HYPRE_Real wall_2 = 0;
HYPRE_Real wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 0);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
{
k1 = col_offd_S_to_A[S_offd_j[kk]];
}
else
{
k1 = S_offd_j[kk];
}
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
{
i1 = col_offd_S_to_A[i1];
}
if (CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
/* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (CF_marker[loc_col] >= 0)
{
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (CF_marker_offd[loc_col] >= 0)
{
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
{
jj_begin_row_offd = jj_counter_offd;
}
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
{
i1 = col_offd_S_to_A[i1];
}
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd]=i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if (CF_marker[loc_col] >= 0)
{
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (CF_marker_offd[loc_col] >= 0)
{
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds();
}
cnt_c = 0;
cnt_f = jj_end_row-jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd-jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
{
ahat[indx] += A_diag_data[jj];
}
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
ahat[indx] -= A_diag_data[kk]*distribute;
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk]*distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk]*distribute;
}
}
if (num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if (num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
{
ahat_offd[indx] -= A_offd_data[kk]*distribute;
}
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute;
}
}
}
}
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
ahat_offd[indx] += A_offd_data[jj];
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++)
{
big_k1 = A_ext_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /*diag*/
loc_col = (HYPRE_Int)(big_k1 - col_1);
indx = ihat[loc_col];
if (indx > -1)
{
ahat[indx] -= A_ext_data[kk]*distribute;
}
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk]*distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk]*distribute;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (num_functions == 1 || dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
{
ahat_offd[indx] -= A_ext_data[kk]*distribute;
}
else if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if (sep_weight == 1)
{
for (jj=0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if (num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if (num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C*diagonal != 0)
{
alfa = sum_neg/sum_neg_C/diagonal;
}
if (sum_pos_C*diagonal != 0)
{
beta = sum_pos/sum_pos_C/diagonal;
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
{
P_diag_data[jj] = -beta*ahat[j1];
}
else
{
P_diag_data[jj] = -alfa*ahat[j1];
}
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
{
ihat[ipnt[jj]] = -1;
}
if (num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
{
P_offd_data[jj] = -beta*ahat_offd[j1];
}
else
{
P_offd_data[jj] = -alfa*ahat_offd[j1];
}
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
{
ihat_offd[ipnt_offd[jj]] = -1;
}
}
}
else
{
for (jj=0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if (num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if (num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C*diagonal != 0)
{
alfa = sum/sum_C/diagonal;
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
{
ihat[ipnt[jj]] = -1;
}
if (num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
{
ihat_offd[ipnt_offd[jj]] = -1;
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag==4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
{
if (CF_marker[i] == -3)
{
CF_marker[i] = -1;
}
}
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(ahat, HYPRE_MEMORY_HOST);
hypre_TFree(ihat, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt, HYPRE_MEMORY_HOST);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPIInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime();
#endif
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Threading variables */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
HYPRE_Int * diag_offset;
HYPRE_Int * fine_to_coarse_offset;
HYPRE_Int * offd_offset;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/* This function is smart enough to check P_marker and P_marker_offd only,
* and set them if they are not NULL. The other vectors are set regardless.*/
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
/*-----------------------------------------------------------------------
* Initialize threading variables
*-----------------------------------------------------------------------*/
max_num_threads[0] = hypre_NumThreads();
diag_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
fine_to_coarse_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
offd_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
for (i=0; i < max_num_threads[0]; i++)
{
diag_offset[i] = 0;
fine_to_coarse_offset[i] = 0;
offd_offset[i] = 0;
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,start,stop,coarse_counter,jj_counter,jj_counter_offd, P_marker, P_marker_offd,jj,kk,i1,k1,loc_col,jj_begin_row,jj_begin_row_offd,jj_end_row,jj_end_row_offd,diagonal,sum,sgn,jj1,i2,distribute,strong_f_marker, big_k1)
#endif
{
/* Parallelize by computing only over each thread's range of rows.
*
* The first large for loop computes ~locally~ for each thread P_diag_i,
* P_offd_i and fine_to_coarse. Then, the arrays are stitched together
* For eaxample the first phase would compute
* P_diag_i = [0, 2, 4, 7, 2, 5, 6]
* for two threads. P_diag_i[stop] points to the end of that
* thread's data, but P_diag_i[start] points to the end of the
* previous thread's row range. This is then stitched together at the
* end to yield,
* P_diag_i = [0, 2, 4, 7, 9, 14, 15].
*
* The second large for loop computes interpolation weights and is
* relatively straight-forward to thread.
*/
/* initialize thread-wise variables */
strong_f_marker = -2;
coarse_counter = 0;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (n_fine)
{
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{ P_marker[i] = -1; }
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
for (i = 0; i < full_off_procNodes; i++)
{ P_marker_offd[i] = -1;}
}
/* this thread's row range */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ stop = n_fine; }
else
{ stop = (n_fine/num_threads)*(my_thread_num+1); }
/* loop over rows */
/* This loop counts the number of elements in P */
/* is done by counting the elmements in the index set C-hat */
for (i = start; i < stop; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
/* row in P corresponding to a coarse pt., will only require one element (1 on the diagonal). */
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* End loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
P_diag_i[stop] = jj_counter;
P_offd_i[stop] = jj_counter_offd;
fine_to_coarse_offset[my_thread_num] = coarse_counter;
diag_offset[my_thread_num] = jj_counter;
offd_offset[my_thread_num] = jj_counter_offd;
/* Stitch P_diag_i, P_offd_i and fine_to_coarse together */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
/* Calculate the offset for P_diag_i and P_offd_i for each thread */
for (i = 1; i < num_threads; i++)
{
diag_offset[i] = diag_offset[i-1] + diag_offset[i];
fine_to_coarse_offset[i] = fine_to_coarse_offset[i-1] + fine_to_coarse_offset[i];
offd_offset[i] = offd_offset[i-1] + offd_offset[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num > 0)
{
/* update row pointer array with offset,
* making sure to update the row stop index */
for (i = start+1; i <= stop; i++)
{
P_diag_i[i] += diag_offset[my_thread_num-1];
P_offd_i[i] += offd_offset[my_thread_num-1];
}
/* update fine_to_coarse by offsetting with the offset
* from the preceding thread */
for (i = start; i < stop; i++)
{
if (fine_to_coarse[i] >= 0)
{ fine_to_coarse[i] += fine_to_coarse_offset[my_thread_num-1]; }
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
}
/* Fine to coarse mapping */
if (num_procs > 1 && my_thread_num == 0)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = start; i < stop; i++)
{
jj_begin_row = P_diag_i[i];
jj_begin_row_offd = P_offd_i[i];
jj_counter = jj_begin_row;
jj_counter_offd = jj_begin_row_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i. */
for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if (sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if (i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row || loc_col == i)
sum += A_ext_data[jj1];
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if (loc_col == i)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
/*-----------------------------------------------------------------------
* End large for loop over nfine
*-----------------------------------------------------------------------*/
if (n_fine)
{
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
}
/*-----------------------------------------------------------------------
* End PAR_REGION
*-----------------------------------------------------------------------*/
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime();
#endif
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
{
if (CF_marker[i] == -3)
{
CF_marker[i] = -1;
}
}
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(diag_offset, HYPRE_MEMORY_HOST);
hypre_TFree(offd_offset, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offset, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPICCInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPICCInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int **ext_p, **ext_p_offd;*/
/*HYPRE_Int ccounter_offd;
HYPRE_Int *clist_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;
HYPRE_Int *clist, ccounter;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/*clist = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS);
for (i = 0; i < MAX_C_CONNECTIONS; i++)
clist[i] = 0;
if (num_procs > 1)
{
clist_offd = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS, HYPRE_MEMORY_HOST);
for (i = 0; i < MAX_C_CONNECTIONS; i++)
clist_offd[i] = 0;
}*/
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
/*clist[ccounter++] = i1;*/
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < P_offd_i[i])
{
/*clist_offd[ccounter_offd++] = i1;*/
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if (hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if (num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
/*if (hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if (!common_c)
{ /* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
/*break;*/
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] == -1)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (CF_marker[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = (HYPRE_BigInt)(-big_k1 - 1);
if (CF_marker_offd[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
/*break;*/
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*clist[ccounter++] = i1;*/
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*clist_offd[ccounter_offd++] = i1;*/
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if (hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if (num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
/*if (hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if (!common_c)
{ /* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (CF_marker[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (CF_marker_offd[loc_col] == 2)
{
/*if (hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
else
{
loc_col = (-big_k1 - 1);
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if (sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if (i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row || loc_col == i)
sum += A_ext_data[jj1];
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if (loc_col == i)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
/*hypre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFFInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFFInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int ccounter_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;
HYPRE_Int *clist, ccounter;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{ /* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] < 0)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{ /* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if (sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFF1Interp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFF1Interp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int ccounter_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;*/
HYPRE_Int found_c = 0;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if (!common_c)
{ /* No common c point, extend the interp set */
found_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] > 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
found_c = 1;
break;
}
}
}
if (num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] < 0)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if (num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if (num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if (!common_c)
{ /* No common c point, extend the interp set */
found_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
found_c = 1;
break;
}
}
}
if (num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if (!common_c)
{
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if (sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = - (HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
/*hynre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtInterpHost(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A);
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1))
{
total_global_cpts = num_cpts_global[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, memory_location_P);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
{
P_offd_i[i] = jj_counter_offd;
}
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
{
k1 = col_offd_S_to_A[S_offd_j[kk]];
}
else
{
k1 = S_offd_j[kk];
}
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
{
i1 = col_offd_S_to_A[i1];
}
if (CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if (P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4)
{
wall_time = time_getWallclockSeconds();
}
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, memory_location_P);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, memory_location_P);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, memory_location_P);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, memory_location_P);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if (num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if (P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if (num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if (col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if (P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if (P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if (big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if (P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0)
{
sgn = -1;
}
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if ((P_marker[i2] >= jj_begin_row ) && (sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1];
}
}
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1];
}
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row )
{
sum += A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if (big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if (P_marker[loc_col] >= jj_begin_row)
{
P_diag_data[P_marker[loc_col]] += distribute * A_ext_data[jj1];
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if (P_marker_offd[loc_col] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[loc_col]] += distribute*A_ext_data[jj1];
}
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
if (diagonal)
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_CSRMatrixMemoryLocation(P_diag) = memory_location_P;
hypre_CSRMatrixMemoryLocation(P_offd) = memory_location_P;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if (P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
{
if (CF_marker[i] == -3)
{
CF_marker[i] = -1;
}
}
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
}
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPushRange("ExtInterp");
#endif
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_BoomerAMGBuildExtInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A,P_ptr);
}
#if defined(HYPRE_USING_CUDA)
else
{
ierr = hypre_BoomerAMGBuildExtInterpDevice(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A,P_ptr);
}
#endif
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPopRange();
#endif
return ierr;
}
/*-----------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPIInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPushRange("ExtPIInterp");
#endif
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_BoomerAMGBuildExtPIInterpHost(A, CF_marker, S, num_cpts_global, num_functions, dof_func,
debug_flag, trunc_factor, max_elmts, col_offd_S_to_A, P_ptr);
}
#if defined(HYPRE_USING_CUDA)
else
{
ierr = hypre_BoomerAMGBuildExtPIInterpDevice(A, CF_marker, S, num_cpts_global, num_functions, dof_func,
debug_flag, trunc_factor, max_elmts, P_ptr);
}
#endif
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPopRange();
#endif
return ierr;
}
|
triplet_iw.c | /* Copyright (C) 2016 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stddef.h>
#include <math.h>
#include <phonoc_utils.h>
#include <triplet_h/triplet.h>
#include <triplet_h/triplet_iw.h>
#include <tetrahedron_method.h>
static void set_freq_vertices(double freq_vertices[3][24][4],
const double *frequencies,
TPLCONST size_t vertices[2][24][4],
const int num_band,
const int b1,
const int b2);
static int set_g(double g[3],
const double f0,
TPLCONST double freq_vertices[3][24][4]);
static int in_tetrahedra(const double f0, TPLCONST double freq_vertices[24][4]);
static void get_triplet_tetrahedra_vertices(
size_t vertices[2][24][4],
TPLCONST int tp_relative_grid_address[2][24][4][3],
const int mesh[3],
const size_t triplet[3],
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map);
void
tpi_get_integration_weight(double *iw,
char *iw_zero,
const double *frequency_points,
const size_t num_band0,
TPLCONST int tp_relative_grid_address[2][24][4][3],
const int mesh[3],
const size_t triplets[3],
const size_t num_triplets,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const double *frequencies,
const size_t num_band,
const size_t num_iw,
const int openmp_per_bands)
{
size_t j, b1, b2, b12, num_band_prod, adrs_shift;
size_t vertices[2][24][4];
double g[3];
double freq_vertices[3][24][4];
get_triplet_tetrahedra_vertices(vertices,
tp_relative_grid_address,
mesh,
triplets,
bz_grid_address,
bz_map);
num_band_prod = num_triplets * num_band0 * num_band * num_band;
#pragma omp parallel for private(j, b1, b2, adrs_shift, g, freq_vertices) if (openmp_per_bands)
for (b12 = 0; b12 < num_band * num_band; b12++) {
b1 = b12 / num_band;
b2 = b12 % num_band;
set_freq_vertices
(freq_vertices, frequencies, vertices, num_band, b1, b2);
for (j = 0; j < num_band0; j++) {
adrs_shift = j * num_band * num_band + b1 * num_band + b2;
iw_zero[adrs_shift] = set_g(g, frequency_points[j], freq_vertices);
iw[adrs_shift] = g[0];
adrs_shift += num_band_prod;
iw[adrs_shift] = g[1] - g[2];
if (num_iw == 3) {
adrs_shift += num_band_prod;
iw[adrs_shift] = g[0] + g[1] + g[2];
}
}
}
}
void tpi_get_integration_weight_with_sigma(double *iw,
char *iw_zero,
const double sigma,
const double cutoff,
const double *frequency_points,
const size_t num_band0,
const size_t triplet[3],
const size_t const_adrs_shift,
const double *frequencies,
const size_t num_band,
const size_t num_iw,
const int openmp_per_bands)
{
size_t j, b12, b1, b2, adrs_shift;
double f0, f1, f2, g0, g1, g2;
#pragma omp parallel for private(j, b1, b2, f0, f1, f2, g0, g1, g2, adrs_shift) if (openmp_per_bands)
for (b12 = 0; b12 < num_band * num_band; b12++) {
b1 = b12 / num_band;
b2 = b12 % num_band;
f1 = frequencies[triplet[1] * num_band + b1];
f2 = frequencies[triplet[2] * num_band + b2];
for (j = 0; j < num_band0; j++) {
f0 = frequency_points[j];
adrs_shift = j * num_band * num_band + b1 * num_band + b2;
if (cutoff > 0 &&
fabs(f0 - f1 - f2) > cutoff &&
fabs(f0 + f1 - f2) > cutoff &&
fabs(f0 - f1 + f2) > cutoff) {
iw_zero[adrs_shift] = 1;
g0 = 0;
g1 = 0;
g2 = 0;
} else {
iw_zero[adrs_shift] = 0;
g0 = gaussian(f0 - f1 - f2, sigma);
g1 = gaussian(f0 + f1 - f2, sigma);
g2 = gaussian(f0 - f1 + f2, sigma);
}
iw[adrs_shift] = g0;
adrs_shift += const_adrs_shift;
iw[adrs_shift] = g1 - g2;
if (num_iw == 3) {
adrs_shift += const_adrs_shift;
iw[adrs_shift] = g0 + g1 + g2;
}
}
}
}
static void set_freq_vertices(double freq_vertices[3][24][4],
const double *frequencies,
TPLCONST size_t vertices[2][24][4],
const int num_band,
const int b1,
const int b2)
{
int i, j;
double f1, f2;
for (i = 0; i < 24; i++) {
for (j = 0; j < 4; j++) {
f1 = frequencies[vertices[0][i][j] * num_band + b1];
f2 = frequencies[vertices[1][i][j] * num_band + b2];
if (f1 < 0) {f1 = 0;}
if (f2 < 0) {f2 = 0;}
freq_vertices[0][i][j] = f1 + f2;
freq_vertices[1][i][j] = -f1 + f2;
freq_vertices[2][i][j] = f1 - f2;
}
}
}
static int set_g(double g[3],
const double f0,
TPLCONST double freq_vertices[3][24][4])
{
int iw_zero;
iw_zero = 1;
if (in_tetrahedra(f0, freq_vertices[0])) {
g[0] = thm_get_integration_weight(f0, freq_vertices[0], 'I');
iw_zero = 0;
} else {
g[0] = 0;
}
if (in_tetrahedra(f0, freq_vertices[1])) {
g[1] = thm_get_integration_weight(f0, freq_vertices[1], 'I');
iw_zero = 0;
} else {
g[1] = 0;
}
if (in_tetrahedra(f0, freq_vertices[2])) {
g[2] = thm_get_integration_weight(f0, freq_vertices[2], 'I');
iw_zero = 0;
} else {
g[2] = 0;
}
return iw_zero;
}
static int in_tetrahedra(const double f0, TPLCONST double freq_vertices[24][4])
{
int i, j;
double fmin, fmax;
fmin = freq_vertices[0][0];
fmax = freq_vertices[0][0];
for (i = 0; i < 24; i++) {
for (j = 0; j < 4; j++) {
if (fmin > freq_vertices[i][j]) {
fmin = freq_vertices[i][j];
}
if (fmax < freq_vertices[i][j]) {
fmax = freq_vertices[i][j];
}
}
}
if (fmin > f0 || fmax < f0) {
return 0;
} else {
return 1;
}
}
static void get_triplet_tetrahedra_vertices(
size_t vertices[2][24][4],
TPLCONST int tp_relative_grid_address[2][24][4][3],
const int mesh[3],
const size_t triplet[3],
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map)
{
int i, j;
for (i = 0; i < 2; i++) {
for (j = 0; j < 24; j++) {
thm_get_dense_neighboring_grid_points(vertices[i][j],
triplet[i + 1],
tp_relative_grid_address[i][j],
4,
mesh,
bz_grid_address,
bz_map);
}
}
}
|
pr1dmt.c | /*!
\file
\brief A parallel page-rank program using 1D distribution of the graph
\date Started 5/29/2013
\author George
*/
#define LOCKMEM 1
#define _LARGEFILE64_SOURCE
#include <GKlib.h>
#include <bdmpi.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <unistd.h>
/**************************************************************************/
/* data structures */
/**************************************************************************/
typedef struct {
int npes, mype;
BDMPI_Comm comm;
char *filename;
int niters;
int nthreads;
/* timers */
double totalTmr;
double setupTmr;
double compTmr;
double commTmr;
double sort1Tmr;
double sort2Tmr;
} params_t;
/* distributed CSR */
typedef struct {
/* the total number of rows/non-zeros and their overall distribution */
int gnrows, gncols;
size_t gnnz;
int *rowdist;
/* sendinfo */
int nsend;
size_t *scounts;
size_t *sdispls;
int *sinds;
/* recvinfo */
int nrecv;
size_t *rcounts;
size_t *rdispls;
int *rinds;
gk_csr_t *mat;
} dcsr_t;
/**************************************************************************/
/* prototypes */
/**************************************************************************/
dcsr_t *LoadData(params_t *params);
void WritePR(params_t *params, dcsr_t *dmat, double *prvec);
void SetupData(params_t *params, dcsr_t *dmat);
void CleanupData(params_t *params, dcsr_t *dmat);
double *ComputePR(params_t *params, dcsr_t *dmat);
/**************************************************************************/
/**************************************************************************/
int main(int argc, char **argv)
{
params_t *params;
dcsr_t *dmat;
double *prvec;
BDMPI_Status status;
double max, current;
setbuf(stdout, NULL);
setbuf(stderr, NULL);
BDMPI_Init(&argc, &argv);
params = (params_t *)gk_malloc(sizeof(params_t), "params");
memset(params, 0, sizeof(params_t));
params->comm = BDMPI_COMM_WORLD;
BDMPI_Comm_size(params->comm, &(params->npes));
BDMPI_Comm_rank(params->comm, &(params->mype));
if (argc != 4) {
if (params->mype == 0)
fprintf(stderr, "Usage: %s filename niters nthreads\n", argv[0]);
BDMPI_Finalize();
return EXIT_FAILURE;
}
#ifdef NOOMP
printf("Running pr1dmt without openmp\n");
#endif
#ifdef LOCKMEM
printf("Using mlock/munlock\n");
#else
printf("NOT using mlock/munlock\n");
#endif
params->filename = gk_strdup(argv[1]);
params->niters = atoi(argv[2]);
params->nthreads = atoi(argv[3]);
omp_set_num_threads(params->nthreads);
gk_clearwctimer(params->totalTmr);
gk_clearwctimer(params->setupTmr);
gk_clearwctimer(params->compTmr);
gk_clearwctimer(params->commTmr);
gk_clearwctimer(params->sort1Tmr);
gk_clearwctimer(params->sort2Tmr);
BDMPI_Barrier(params->comm);
BDMPI_Barrier(params->comm);
gk_startwctimer(params->totalTmr);
dmat = LoadData(params);
gk_startwctimer(params->setupTmr);
SetupData(params, dmat);
gk_stopwctimer(params->setupTmr);
prvec = ComputePR(params, dmat);
WritePR(params, dmat, prvec);
CleanupData(params, dmat);
BDMPI_Barrier(params->comm);
BDMPI_Barrier(params->comm);
gk_stopwctimer(params->totalTmr);
/* print timing stats */
current = gk_getwctimer(params->sort1Tmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0 && max>0)
printf(" sort1Tmr: %10.4lf\n", max);
current = gk_getwctimer(params->sort2Tmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0 && max>0)
printf(" sort2Tmr: %10.4lf\n", max);
current = gk_getwctimer(params->setupTmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0 && max>0)
printf(" setupTmr: %10.4lf\n", max);
current = gk_getwctimer(params->compTmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0 && max>0)
printf(" compTmr: %10.4lf\n", max);
current = gk_getwctimer(params->commTmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0 && max>0)
printf(" commTmr: %10.4lf\n", max);
current = gk_getwctimer(params->totalTmr);
BDMPI_Reduce(¤t, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm);
if (params->mype == 0 && max>0)
printf(" totalTmr: %10.4lf\n", max);
BDMPI_Finalize();
return EXIT_SUCCESS;
}
/**************************************************************************/
/*! Reads a sparse matrix in binary CSR format, one process at a time.
\returns the local portion of the matrix.
*/
/**************************************************************************/
dcsr_t *LoadData(params_t *params)
{
int mype=params->mype, npes=params->npes, token=1;
int lrank, lsize;
size_t i, p, gnnz, lnnz;
ssize_t rsize;
int fd, gnrows, gncols, lnrows;
dcsr_t *dmat=NULL;
BDMPI_Status status;
off64_t fpos;
ssize_t *rowptr;
BDMPI_Comm_lrank(params->comm, &lrank);
BDMPI_Comm_lsize(params->comm, &lsize);
if (mype == 0) {
if (!gk_fexists(params->filename))
errexit("File %s does not exist!\n", params->filename);
}
dmat = (dcsr_t *)gk_malloc(sizeof(dcsr_t), "dmat");
memset(dmat, 0, sizeof(dcsr_t));
dmat->rowdist = gk_imalloc(npes+1, "rowdist");
/* root determines the rowdist array so that it balances the lnnz's */
if (mype == 0) {
if ((fd = open(params->filename, O_RDONLY)) == -1)
errexit("Failed opeing the file %s. [%s]\n", params->filename, strerror(errno));
if (gk_read(fd, &gnrows, sizeof(int)) != sizeof(int))
errexit("Failed to read the nrows from file %s!\n", params->filename);
if (gk_read(fd, &gncols, sizeof(int)) != sizeof(int))
errexit("Failed to read the ncols from file %s!\n", params->filename);
rowptr = gk_zmalloc(gnrows+1, "rowptr");
if (gk_read(fd, rowptr, sizeof(ssize_t)*(gnrows+1)) != sizeof(ssize_t)*(gnrows+1))
errexit("Failed to read the rowptr from file %s!\n", params->filename);
close(fd);
/* populate the rowdist */
dmat->rowdist[0] = 0;
for (i=0, p=0; p<npes; p++) {
lnnz = rowptr[i] + (rowptr[gnrows] - rowptr[i] + npes - p - 1)/(npes-p);
for (; i<gnrows; i++) {
if (rowptr[i] >= lnnz)
break;
}
dmat->rowdist[p+1] = i;
//printf("%5zu %10zu %10d %10zu %10zu\n", p, i, gnrows, lnnz, rowptr[gnrows]);
}
gk_free((void **)&rowptr, LTERM);
}
/* broadcast rowdist */
BDMPI_Bcast(dmat->rowdist, npes+1, BDMPI_INT, 0, params->comm);
/* wait your turn */
if (lrank != 0)
BDMPI_Recv(&token, 1, BDMPI_INT, mype-1, 1, params->comm, &status);
if ((fd = open(params->filename, O_RDONLY)) == -1)
errexit("Failed opeing the file %s. [%s]\n", params->filename, strerror(errno));
if (gk_read(fd, &gnrows, sizeof(int)) != sizeof(int))
errexit("Failed to read the nrows from file %s!\n", params->filename);
if (gk_read(fd, &gncols, sizeof(int)) != sizeof(int))
errexit("Failed to read the ncols from file %s!\n", params->filename);
fpos = 2*sizeof(int) + gnrows*sizeof(ssize_t);
if (lseek64(fd, fpos, SEEK_SET) == -1)
errexit("Failed to lseek for %s. error: %s!\n", params->filename, strerror(errno));
if (gk_read(fd, &gnnz, sizeof(size_t)) != sizeof(size_t))
errexit("Failed to read the gnnz from file %s!\n", params->filename);
dmat->gnrows = gnrows;
dmat->gncols = gncols;
dmat->gnnz = gnnz;
lnrows = dmat->rowdist[mype+1]-dmat->rowdist[mype];
dmat->mat = gk_csr_Create();
dmat->mat->nrows = lnrows;
dmat->mat->ncols = gncols;
/* read the rowptr */
dmat->mat->rowptr = gk_zmalloc(lnrows+1, "dmat->mat->rowptr");
fpos = 2*sizeof(int) + dmat->rowdist[mype]*sizeof(ssize_t);
if (lseek64(fd, fpos, SEEK_SET) == -1)
gk_errexit(SIGERR, "Failed to lseek for %s. error: %s!\n", params->filename, strerror(errno));
if (gk_read(fd, dmat->mat->rowptr, sizeof(ssize_t)*(lnrows+1)) != sizeof(ssize_t)*(lnrows+1))
gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", params->filename);
/* read the rowind */
lnnz = dmat->mat->rowptr[lnrows]-dmat->mat->rowptr[0];
dmat->mat->rowind = gk_imalloc(lnnz, "dmat->mat->rowind");
fpos = 2*sizeof(int) + sizeof(ssize_t)*(gnrows+1) + sizeof(int)*dmat->mat->rowptr[0];
if (lseek64(fd, fpos, SEEK_SET) == -1)
gk_errexit(SIGERR, "Failed to lseek for %s. error: %s!\n", params->filename, strerror(errno));
if ((rsize = gk_read(fd, dmat->mat->rowind, sizeof(int)*lnnz)) != (ssize_t)(sizeof(int)*lnnz))
gk_errexit(SIGERR, "Failed to read the rowind from file %s [%zd %zd]!\n",
params->filename, rsize, sizeof(float)*lnnz);
#ifdef XXX
/* read the rowval */
lnnz = dmat->mat->rowptr[lnrows]-dmat->mat->rowptr[0];
dmat->mat->rowval = gk_fmalloc(lnnz, "dmat->mat->rowval");
fpos = 2*sizeof(int) + sizeof(ssize_t)*(gnrows+1) + sizeof(int)*gnnz + sizeof(float)*dmat->mat->rowptr[0];
if (lseek64(fd, fpos, SEEK_SET) == -1)
gk_errexit(SIGERR, "Failed to lseek for %s. error: %s!\n", params->filename, strerror(errno));
if ((rsize = gk_read(fd, dmat->mat->rowval, sizeof(float)*lnnz)) != (ssize_t)(sizeof(float)*lnnz))
gk_errexit(SIGERR, "Failed to read the rowval from file %s [%zd %zd]!\n",
params->filename, rsize, sizeof(float)*lnnz);
#endif
/* localize adjust rowptr */
for (i=lnrows; i>0; i--)
dmat->mat->rowptr[i] -= dmat->mat->rowptr[0];
dmat->mat->rowptr[0] = 0;
close(fd);
if (lrank != lsize-1)
BDMPI_Send(&token, 1, BDMPI_INT, mype+1, 1, params->comm);
printf("[%3d] dmat->gnrows/lnrows: %d/%d, dmat->gncols/lncols: %d/%d, "
"dmat->gnnz/lnnz: %zu/%zu [ts: %d]\n",
mype,
dmat->gnrows, dmat->mat->nrows,
dmat->gncols, dmat->mat->ncols,
dmat->gnnz, dmat->mat->rowptr[dmat->mat->nrows], (int)time(NULL));
return dmat;
}
/**************************************************************************/
/*! Writes the page-rank vector. It just let each process write its portion
to the file in a round-robin fashion.
*/
/**************************************************************************/
void WritePR(params_t *params, dcsr_t *dmat, double *prvec)
{
int npes=params->npes, mype=params->mype, dummy=0;
size_t i;
BDMPI_Status status;
FILE *fpout;
char outfile[1024];
sprintf(outfile, "%s.prvec", params->filename);
if (mype == 0) {
fpout = gk_fopen(outfile, "w", "outfile");
for (i=0; i<dmat->mat->nrows; i++)
fprintf(fpout, "%.8le\n", prvec[i]);
gk_fclose(fpout);
if (mype+1 < npes)
BDMPI_Send(&dummy, 1, BDMPI_INT, mype+1, 1, params->comm);
}
else {
BDMPI_Recv(&dummy, 1, BDMPI_INT, mype-1, 1, params->comm, &status);
fpout = gk_fopen(outfile, "a", "outfile");
for (i=0; i<dmat->mat->nrows; i++)
fprintf(fpout, "%lf\n", prvec[i]);
gk_fclose(fpout);
if (mype+1 < npes)
BDMPI_Send(&mype, 1, BDMPI_INT, mype+1, 1, params->comm);
}
}
/**************************************************************************/
/*! This function setups the various data-structures for communication and
then proceeds to create a stochastic matrix for PR calculations.
*/
/**************************************************************************/
void SetupData(params_t *params, dcsr_t *dmat)
{
int npes=params->npes, mype=params->mype;
int nrows, firstrow, lastrow, nunique;
size_t i, j, p, npairs;
ssize_t *rowptr;
int *rowind;
gk_ikv_t *pairs;
nrows = dmat->mat->nrows;
rowptr = dmat->mat->rowptr;
rowind = dmat->mat->rowind;
firstrow = dmat->rowdist[mype];
lastrow = dmat->rowdist[mype+1];
/* ============================================================ */
/* SETUP SINFO */
/* ============================================================ */
/* determine the number of non-local non-zeros */
for (npairs=0, i=0; i<rowptr[nrows]; i++) {
if (rowind[i] < firstrow || rowind[i] >= lastrow)
npairs++;
}
/* put them in a gk_ikv_t type so that you can renumber them afterwards */
pairs = gk_ikvmalloc(npairs, "npairs");
for (npairs=0, i=0; i<rowptr[nrows]; i++) {
if (rowind[i] < firstrow || rowind[i] >= lastrow) {
pairs[npairs].key = rowind[i];
pairs[npairs].val = i;
npairs++;
}
else { /* renumber the local index */
rowind[i] -= firstrow;
}
}
gk_startwctimer(params->sort2Tmr);
gk_ikvsorti(npairs, pairs);
gk_stopwctimer(params->sort2Tmr);
/* determine the unique remote indices and renumber them */
rowind[pairs[0].val] = nrows;
for (nunique=0, i=1; i<npairs; i++) {
if (pairs[i-1].key != pairs[i].key)
nunique++;
rowind[pairs[i].val] = nrows + nunique;
}
nunique++;
/* allocate memory for the sinfo (we implement a push-based algorithm) */
dmat->nsend = nunique;
dmat->scounts = gk_zumalloc(npes, "scounts");
dmat->sdispls = gk_zumalloc(npes+1, "sdispls");
dmat->sinds = gk_imalloc(nunique, "sinds");
/* copy the unique indices into dmat->sinds */
dmat->sinds[0] = pairs[0].key;
for (nunique=1, i=1; i<npairs; i++) {
if (pairs[i-1].key != pairs[i].key)
dmat->sinds[nunique++] = pairs[i].key;
}
/* determine scounts/sdispls */
dmat->sdispls[0] = 0;
for (i=0, p=0; p<npes; p++) {
for (; i<nunique; i++) {
if (dmat->sinds[i] >= dmat->rowdist[p+1])
break;
}
dmat->sdispls[p+1] = i;
dmat->scounts[p] = dmat->sdispls[p+1]-dmat->sdispls[p];
}
gk_free((void **)&pairs, LTERM);
/* ============================================================ */
/* SETUP RINFO */
/* ============================================================ */
/* allocate memory for rcounts and perform an all-to-all to get the data */
dmat->rcounts = gk_zumalloc(npes, "rcounts");
BDMPI_Alltoall(dmat->scounts, 1, BDMPI_SIZE_T, dmat->rcounts, 1, BDMPI_SIZE_T, params->comm);
/* allocate memory for rdispls and fill it */
dmat->rdispls = gk_zumalloc(npes+1, "rdispls");
dmat->rdispls[0] = 0;
for (i=0; i<npes; i++)
dmat->rdispls[i+1] = dmat->rdispls[i] + dmat->rcounts[i];
dmat->nrecv = dmat->rdispls[npes];
/* allocate memory for rinds and populate it via an all-to-all */
dmat->rinds = gk_imalloc(dmat->nrecv, "rinds");
BDMPI_Alltoallv(dmat->sinds, dmat->scounts, dmat->sdispls, BDMPI_INT,
dmat->rinds, dmat->rcounts, dmat->rdispls, BDMPI_INT,
params->comm);
/* free sinds, as they will not be used again */
gk_free((void **)&dmat->sinds, LTERM);
/* localize the indices in dmat->rinds */
for (i=0; i<dmat->nrecv; i++)
dmat->rinds[i] -= firstrow;
printf("[%3d] nsend: %d, nrecv: %d[ts: %d]\n", mype, dmat->nsend,
dmat->nrecv,(int)time(NULL));
/* create the scaling weights and transpose the matrix */
dmat->mat->rwgts = gk_fmalloc(nrows, "rwgts");
for (i=0; i<nrows; i++)
dmat->mat->rwgts[i] = (rowptr[i+1]-rowptr[i] > 0 ? 1.0/(rowptr[i+1]-rowptr[i]) : 0.0);
dmat->mat->ncols = nrows + dmat->nsend;
gk_csr_CreateIndex(dmat->mat, GK_CSR_COL);
gk_free((void **)&dmat->mat->rowptr, &dmat->mat->rowind, &dmat->mat->rowval, LTERM);
return;
}
/**************************************************************************/
/*! This function deallocates all the memory that was used */
/**************************************************************************/
void CleanupData(params_t *params, dcsr_t *dmat)
{
gk_csr_Free(&(dmat->mat));
gk_free((void **)&dmat->rowdist,
&dmat->scounts, &dmat->sdispls,
&dmat->rcounts, &dmat->rdispls, &dmat->rinds,
&dmat, LTERM);
return;
}
/**************************************************************************/
/*! This function computes the page-rank scores using a push approach */
/**************************************************************************/
double *ComputePR(params_t *params, dcsr_t *dmat)
{
int npes=params->npes, mype=params->mype;
size_t iter, i, j, p, nrows, ncols, nsend;
int *colind, *rinds, k;
ssize_t *colptr;
double *pr1, *pr2, *tpr, dtmp;
double lambda=.2, rprob, lrmsd, grmsd;
float *rwgts;
BDMPI_Status status;
nsend = dmat->nsend;
nrows = dmat->mat->nrows;
ncols = dmat->mat->ncols;
colptr = dmat->mat->colptr;
colind = dmat->mat->colind;
rwgts = dmat->mat->rwgts;
rprob = 1.0/dmat->gnrows;
pr1 = gk_dsmalloc(nrows, rprob, "pr");
/* get into the PR iteration */
for (iter=0; iter<=params->niters; iter++) {
BDMPI_Barrier(params->comm); /* this can be removed... */
tpr = gk_dmalloc(ncols, "tpr");
#ifdef LOCKMEM
GKWARN(BDMPI_mlock(tpr, ncols*sizeof(double)) == 0);
GKWARN(BDMPI_mlock(pr1, nrows*sizeof(double)) == 0);
GKWARN(BDMPI_mlock(colptr, (ncols+1)*sizeof(ssize_t)) == 0);
GKWARN(BDMPI_mlock(colind, colptr[ncols]*sizeof(int)) == 0);
GKWARN(BDMPI_mlock(rwgts, nrows*sizeof(float)) == 0);
#endif
/****************************************************************/
/* computation phase */
/****************************************************************/
gk_startwctimer(params->compTmr);
/* push random-walk scores to the outlinks */
#ifndef NOOMP
#pragma omp parallel for default(none), \
private(i, j, k, dtmp), \
shared(ncols, colptr, tpr, rwgts, colind, pr1)
#endif
for (i=0; i<ncols; i++) {
for (dtmp=0.0, j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
dtmp += rwgts[k]*pr1[k];
}
tpr[i] = dtmp;
}
gk_stopwctimer(params->compTmr);
#ifdef LOCKMEM
GKWARN(BDMPI_munlockall() == 0);
#endif
/****************************************************************/
/* communication phase */
/****************************************************************/
gk_startwctimer(params->commTmr);
/* send your partial results */
tpr += nrows;
#ifdef LOCKMEM
GKWARN(BDMPI_mlock(tpr, nsend*sizeof(double)) == 0);
#endif
for (j=1; j<npes; j++) {
p = (mype+j)%npes;
BDMPI_Send(tpr+dmat->sdispls[p], dmat->scounts[p], BDMPI_DOUBLE,
p, 2000, params->comm);
}
#ifdef LOCKMEM
GKWARN(BDMPI_munlock(tpr, nsend*sizeof(double)) == 0);
#endif
tpr -= nrows;
/* keep the partial results for your own vertices in tpr */
pr2 = gk_dcopy(nrows, tpr, gk_dmalloc(nrows, "pr2"));
gk_free((void **)&tpr, LTERM);
/* receive and incorporate remote partial results */
for (j=1; j<npes; j++) {
BDMPI_Probe(BDMPI_ANY_SOURCE, 2000, params->comm, &status);
p = status.BDMPI_SOURCE;
tpr = gk_dmalloc(nrows, "tpr");
BDMPI_Recv(tpr, nrows, BDMPI_DOUBLE, p, 2000, params->comm, &status);
/* incorporate remote updates */
gk_startwctimer(params->compTmr);
rinds = dmat->rinds + dmat->rdispls[p];
#ifdef LOCKMEM
GKWARN(BDMPI_mlock(rinds, dmat->rcounts[p]*sizeof(int)) == 0);
GKWARN(BDMPI_mlock(pr2, nrows*sizeof(double)) == 0);
#endif
for (i=0; i<dmat->rcounts[p]; i++)
pr2[rinds[i]] += tpr[i];
#ifdef LOCKMEM
GKWARN(BDMPI_munlock(rinds, dmat->rcounts[p]*sizeof(int)) == 0);
GKWARN(BDMPI_munlock(pr2, nrows*sizeof(double)) == 0);
#endif
gk_stopwctimer(params->compTmr);
gk_free((void **)&tpr, LTERM);
}
gk_stopwctimer(params->commTmr);
/* apply the restart condition */
#ifdef LOCKMEM
GKWARN(BDMPI_mlock(pr2, nrows*sizeof(double)) == 0);
#endif
for (i=0; i<nrows; i++)
pr2[i] = lambda*rprob + (1.0-lambda)*pr2[i];
#ifdef LOCKMEM
GKWARN(BDMPI_munlock(pr2, nrows*sizeof(double)) == 0);
#endif
printf("[pe %d] Finished Iter: %5zu [ts: %d]\n",mype,iter,(int)time(NULL));
/* do some reporting */
if (iter%10 == 0 || iter == params->niters) {
#ifdef LOCKMEM
GKWARN(BDMPI_mlock(pr1, nrows*sizeof(double)) == 0);
GKWARN(BDMPI_mlock(pr2, nrows*sizeof(double)) == 0);
#endif
/* compute the difference */
for (lrmsd=0.0, i=0; i<nrows; i++)
lrmsd += (pr1[i]-pr2[i])*(pr1[i]-pr2[i]);
#ifdef LOCKMEM
GKWARN(BDMPI_munlock(pr1, nrows*sizeof(double)) == 0);
GKWARN(BDMPI_munlock(pr2, nrows*sizeof(double)) == 0);
#endif
gk_free((void **)&pr1, LTERM);
pr1 = pr2;
/* get the global rmsd across all processors */
gk_startwctimer(params->commTmr);
BDMPI_Allreduce(&lrmsd, &grmsd, 1, BDMPI_DOUBLE, BDMPI_SUM, params->comm);
grmsd = sqrt(grmsd);
gk_stopwctimer(params->commTmr);
if (mype == 0)
printf("Iter: %5zu, grmsd: %.6le[ts: %d]\n", iter, grmsd,
(int)time(NULL));
}
else {
gk_free((void **)&pr1, LTERM);
pr1 = pr2;
}
}
return pr1;
}
|
py-cky.h | // py-cky.h
//
// (c) Mark Johnson, 27th January 2006, last modified 2nd May, 2013
#ifndef PY_CKY_H
#define PY_CKY_H
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdlib>
// #include <ext/hash_map>
#include <iostream>
#include <map>
#include <set>
#include <sstream>
#include <utility>
#include <vector>
#include <tr1/unordered_map>
#include <omp.h>
#include "earley.h"
#include "gammadist.h"
#include "mt19937ar.h"
#include "slice-sampler.h"
#include "sym.h"
#include "xtree.h"
#include "trie.h"
#include "utility.h"
extern int debug;
//! Suppose there are n samples occupying m tables.
//! Then the probability that the n+1 sample occupies
//! table 1 <= k <= m is:
//!
//! P(x_{n+1} = k) = (n_k - a)/(n + b)
//!
//! and the probability that the n+1 sample occupies
//! the new table m+1 is:
//!
//! P(x_{n+1} = m+1) = (m*a + b)/(n + b)
//!
//! The probability of a configuration in which a
//! restaurant contains n customers at m tables,
//! with n_k customers at table k is:
//!
//!
//! a^{-m} G(m+b/a) G(b) G(n_k-a)
//! -------- ------ \prod_{k=1}^m --------
//! G(b/a) G(n+b) G(1-a)
//!
//! where G is the Gamma function.
inline float power(float x, float y) { return y == 1 ? x : powf(x, y); }
inline double power(double x, double y) { return y == 1 ? x : pow(x, y); }
#ifndef QUADPREC
typedef double F;
#else
#include "quadmath.h"
typedef __float128 F;
inline __float128 power(__float128 x, __float128 y) { return y == 1 ? x : pow(double(x), double(y)); }
inline __float128 log(__float128 x) {return log(double(x));}
#endif
#define MIN_PROB_VALUE -70000000
// inline long double power(long double x, long double y) { return powl(x, y); }
typedef symbol S;
typedef std::vector<S> Ss;
typedef std::vector<Ss> Sss;
typedef std::map<S,F> S_F;
// typedef tr1::unordered_map<S,F> S_F;
typedef std::pair<S,Ss> SSs;
typedef std::map<SSs,F> SSs_F;
//! readline_symbols() reads all of the symbols on the current
//! line into syms
//
inline
std::istream& readline_symbols(std::istream& is, Ss& syms) {
syms.clear();
std::string line;
if (std::getline(is, line)) {
std::istringstream iss(line);
std::string s;
while (iss >> s)
syms.push_back(s);
}
return is;
} // readline_symbols()
//! A default_value_type{} object is used to read an object from a stream,
//! assigning a default value if the read fails. Users should not need to
//! construct such objects, but should use the default_value() function instead.
//
template <typename object_type, typename default_type>
struct default_value_type {
object_type& object;
const default_type defaultvalue;
default_value_type(object_type& object, const default_type defaultvalue)
: object(object), defaultvalue(defaultvalue) { }
};
//! default_value() is used to read an object from a stream, assigning a
//! default value if the read fails. It returns a default_value_type{}
//! object, which does the actual reading.
//
template <typename object_type, typename default_type>
default_value_type<object_type,default_type>
default_value(object_type& object, const default_type defaultvalue=default_type()) {
return default_value_type<object_type,default_type>(object, defaultvalue);
}
//! This operator>>() reads default_value_type{} from an input stream.
//
template <typename object_type, typename default_type>
std::istream& operator>> (std::istream& is,
default_value_type<object_type, default_type> dv) {
if (is) {
if (is >> dv.object)
;
else {
is.clear(is.rdstate() & ~std::ios::failbit); // clear failbit
dv.object = dv.defaultvalue;
}
}
return is;
}
// inline F random1() { return rand()/(RAND_MAX+1.0); }
inline F random1() { return mt_genrand_res53(); }
//! A pycfg_type is a CKY parser for a py-cfg
//
struct pycfg_type {
pycfg_type()
: estimate_theta_flag(false), predictive_parse_filter(false),
default_weight(1), default_pya(1e-1), default_pyb(1e3),
pya_beta_a(0), pya_beta_b(0), pyb_gamma_s(0), pyb_gamma_c(0) { }
typedef unsigned int U;
typedef std::pair<U,U> UU;
typedef std::map<S,U> S_U;
typedef std::map<S,UU> S_UU;
typedef tr1::unordered_map<S,S_F> S_S_F;
typedef trie<S, S_F> St_S_F;
typedef St_S_F::const_iterator Stit;
typedef catcounttree_type tree;
typedef std::set<tree*> sT;
typedef trie<S,sT> St_sT;
typedef std::vector<tree*> Ts;
typedef std::map<S,Ts> S_Ts;
//! If estimate_theta_flag is true, then we estimate the generator
//! rule weights using a Dirichlet prior
//
bool estimate_theta_flag;
//! If predictive_parse_filter is true, then first do a deterministic
//! Earley parse of each sentence and use this to filter the nondeterministic
//! CKY parses
//
bool predictive_parse_filter;
//! predictive_parse_filter_grammar is the grammar used by the Earley parser
//
earley::grammar predictive_parse_filter_grammar;
//! start is the start symbol of the grammar
//
S start;
//! rhs_parent_weight maps the right-hand sides of rules
//! to rule parent and rule weight
//
St_S_F rhs_parent_weight;
St_S_F filtered_rhs_parent_weight;
//! unarychild_parent_weight maps unary children to a vector
//! of parent-weight pairs
//
S_S_F unarychild_parent_weight;
S_S_F filtered_unarychild_parent_weight;
//! parent_weight maps parents to the sum of their rule weights
//
S_F parent_weight;
//! default_weight is the default weight for rules with no explicit
//! weight. Used when grammar is read in.
//
F default_weight;
//! rule_priorweight is the prior weight of rule
//
SSs_F rule_priorweight;
//! parent_priorweight is the prior weight the parent
//
S_F parent_priorweight;
//! terms_pytrees maps terminal strings to their PY trees
//
St_sT terms_pytrees;
St_sT filtered_terms_pytrees;
//! parent_pyn maps parents to the number of times they have been expanded
//
S_U parent_pyn;
//! parent_pym maps parents to the number of distinct PY tables for parent
//
S_U parent_pym;
//! rule table
std::map<S, std::vector<Ss> > rules_look_up;
F default_pya; //!< default value for pya
F default_pyb; //!< default value for pyb
F pya_beta_a; //!< alpha parameter of Beta prior on pya
F pya_beta_b; //!< beta parameter of Beta prior on pya
F pyb_gamma_s; //!< s parameter of Gamma prior on pyb
F pyb_gamma_c; //!< c parameter of Gamma prior on pyb
S_F parent_pya; //!< pya value for parent
S_F parent_pyb; //!< pyb value for parent
//! get_pya() returns the value of pya for this parent
//
F get_pya(S parent) const {
S_F::const_iterator it = parent_pya.find(parent);
return (it == parent_pya.end()) ? default_pya : it->second;
} // pycfg_type::get_pya()
//! set_pya() sets the value of pya for this parent, returning
//! the old value for pya
//
F set_pya(S parent, F pya) {
F old_pya = default_pya;
S_F::iterator it = parent_pya.find(parent);
if (it != parent_pya.end())
old_pya = it->second;
if (pya != default_pya)
parent_pya[parent] = pya;
else // pya == default_pya
if (it != parent_pya.end())
parent_pya.erase(it);
return old_pya;
} // pycfg_type::set_pya()
//! get_pyb() returns the value of pyb for this parent
//
F get_pyb(S parent) const {
S_F::const_iterator it = parent_pyb.find(parent);
return (it == parent_pyb.end()) ? default_pyb : it->second;
} // pycfg_type::get_pyb()
//! sum_pym() returns the sum of the pym for all parents
//
U sum_pym() const {
U sum = 0;
cforeach (S_U, it, parent_pym)
sum += it->second;
return sum;
} // pycfg_type::sum_pym()
//! terms_pytrees_size() returns the number of trees in terms_pytrees.
//
U terms_pytrees_size() const {
U size = 0;
terms_pytrees.for_each(terms_pytrees_size_helper(size));
return size;
} // pycfg_type::terms_pytrees_size()
struct terms_pytrees_size_helper {
U& size;
terms_pytrees_size_helper(U& size) : size(size) { }
template <typename Words, typename TreePtrs>
void operator() (const Words& words, const TreePtrs& tps) {
size += tps.size();
// Jackie: TreePtrs -- set of trees
} // pycfg_type::terms_pytrees_size_helper::operator()
}; // pycfg_type::terms_pytrees_size_helper{}
//! rule_weight() returns the weight of rule parent --> rhs
//
template <typename rhs_type>
F rule_weight(S parent, const rhs_type& rhs) const {
assert(!rhs.empty());
if (rhs.size() == 1) {
S_S_F::const_iterator it = unarychild_parent_weight.find(rhs[0]);
if (it == unarychild_parent_weight.end())
return 0;
else
return dfind(it -> second, parent);
}
else { // rhs.size() > 1
Stit it = rhs_parent_weight.find(rhs);
if (it == rhs_parent_weight.end())
return 0;
else
return dfind(it->data, parent);
}
} // pycfg_type::rule_weight()
//! rule_prob() returns the probability of rule parent --> rhs
//
template <typename rhs_type>
F rule_prob(S parent, const rhs_type& rhs) const {
assert(!rhs.empty());
F parentweight = afind(parent_weight, parent);
F ruleweight = rule_weight(parent, rhs);
assert(ruleweight > 0);
assert(parentweight > 0);
return ruleweight/parentweight;
} // pycfg_type::rule_prob()
F PLUT2BProb(const Ss& plu_top, const Sss& plu_btm) {
F prob = 1;
assert(plu_top.size() == plu_btm.size());
for (size_t i = 0; i < plu_top.size(); ++i) {
F ruleprob = rule_prob(plu_top[i], plu_btm[i]);
if (debug == -1) {
std::cerr << "tree_prob "
<< plu_top[i] << " --> " << plu_btm[i]
<< ", prob = " << ruleprob << std::endl;
}
prob *= ruleprob;
}
return prob;
}
//! tree_prob() returns the probability of the tree under the current
//! model
//
F tree_prob(const tree* tp) const {
if (tp -> children.empty())
return 1;
F pya = get_pya(tp->cat);
if (pya == 1) { // no cache
F prob = 1;
Ss children;
cforeach(tree::ptrs_type, it, tp->children) {
children.push_back((*it)->cat);
prob *= tree_prob(*it);
}
prob *= rule_prob(tp->cat, children);
return prob;
}
F pyb = get_pyb(tp->cat);
U pym = dfind(parent_pym, tp->cat);
U pyn = dfind(parent_pyn, tp->cat);
if (tp->count > 0) { // existing node
assert(tp->count <= pyn);
assert(pym > 0);
F prob = (tp->count - pya)/(pyn + pyb);
assert(finite(prob)); assert(prob > 0); assert(prob <= 1);
return prob;
}
// new node
F prob = (pym * pya + pyb)/(pyn + pyb);
assert(finite(prob)); assert(prob > 0); assert(prob <= 1);
Ss children;
cforeach(tree::ptrs_type, it, tp->children) {
children.push_back((*it)->cat);
prob *= tree_prob(*it);
}
prob *= rule_prob(tp->cat, children);
if (prob < 0) {
std::cerr << "## pycfg_type::tree_prob(" << *tp << ") = "
<< prob << std::endl;
}
assert(finite(prob)); assert(prob <= 1); assert(prob >= 0);
// assert(prob > 0);
return prob;
} // pycfg_type::tree_prob()
//! incrrule() increments the weight of the rule parent --> rhs,
//! returning the probability of this rule under the old grammar.
//
template <typename rhs_type>
F incrrule(S parent, const rhs_type& rhs, F weight = 1) {
assert(!rhs.empty());
assert(weight >= 0);
F& parentweight = parent_weight[parent];
F parentweight0 = parentweight;
F rhsweight0;
parentweight += weight;
if (rhs.size() == 1) {
F& rhsweight = unarychild_parent_weight[rhs[0]][parent];
rhsweight0 = rhsweight;
rhsweight += weight;
}
else { // rhs.size() > 1
F& rhsweight = rhs_parent_weight[rhs][parent];
rhsweight0 = rhsweight;
rhsweight += weight;
}
assert(parentweight0 >= 0);
assert(rhsweight0 >= 0);
return rhsweight0/parentweight0;
} // incrrule()
//! decrrule() decrements the weight of rule parent --> rhs,
//! returning the probability of this rule under the new grammar,
//! and deletes the rule if it has weight 0.
//
template <typename rhs_type>
F decrrule(S parent, const rhs_type& rhs, F weight = 1) {
assert(weight >= 0);
assert(!rhs.empty());
F rhsweight;
F parentweight = (parent_weight[parent] -= weight);
assert(parentweight >= 0);
if (parentweight == 0) {
parent_weight.erase(parent);
}
if (rhs.size() == 1) {
S_F& parent1_weight = unarychild_parent_weight[rhs[0]];
rhsweight = (parent1_weight[parent] -= weight);
assert(rhsweight >= 0);
if (rhsweight == 0) {
parent1_weight.erase(parent);
if (parent1_weight.empty()) {
unarychild_parent_weight.erase(rhs[0]);
}
}
}
else { // non-unary rule
S_F& parent1_weight = rhs_parent_weight[rhs];
rhsweight = (parent1_weight[parent] -= weight);
if (rhsweight == 0) {
parent1_weight.erase(parent);
if (parent1_weight.empty()) {
rhs_parent_weight.erase(rhs);
}
}
}
return rhsweight/parentweight;
} // pycfg_type::decrrule()
//! IncreasePLUT2B()
//! increment the part of rules from PLU_Top to PLU_Btm
F IncreasePLUT2B(const Ss& TOPs, const Sss& BTMs) {
F prob = 1;
F weight = 1;
assert(TOPs.size() == BTMs.size());
for (size_t i = 0; i < TOPs.size(); ++i) {
assert(BTMs[i].size()> 0);
F ruleprob = incrrule(TOPs[i], BTMs[i], \
estimate_theta_flag * weight);
if (debug == -1) {
std::cerr << "Increase " << TOPs[i]
<< " --> " << BTMs[i]
<< ", prob = " << ruleprob << std::endl;
}
prob *= ruleprob;
}
return prob;
}
//! DecreasePLUT2B()
//! increment the part of rules from PLU_Top to PLU_Btm
F DecreasePLUT2B(const Ss& TOPs, const Sss& BTMs) {
F prob = 1;
F weight = 1;
assert(TOPs.size() == BTMs.size());
for (size_t i = 0; i < TOPs.size(); ++i) {
assert(BTMs[i].size()> 0);
F ruleprob = decrrule(TOPs[i], BTMs[i], \
estimate_theta_flag * weight);
if (debug == -1) {
std::cerr << "Decrease " << TOPs[i]
<< " --> " << BTMs[i]
<< ", prob = " << ruleprob << std::endl;
}
prob *= ruleprob;
}
return prob;
}
//! incrtree() increments the cache for tp, increments
//! the rules if the cache count is appropriate, and returns
//! the probability of this tree under the original model.
//
F incrtree(tree* tp) {
U weight = 1; // added by Jackie
if (tp -> children.empty()) {
return 1; // terminal node
}
assert(weight >= 0);
F pya = get_pya(tp->cat); // PY cache statistics
F pyb = get_pyb(tp->cat);
if (pya == 1) { // don't table this category
F prob = 1;
{
Ss children;
cforeach (tree::ptrs_type, it, tp -> children)
children.push_back((*it)->cat);
prob *= incrrule(tp->cat, children, estimate_theta_flag*weight);
}
cforeach (tree::ptrs_type, it, tp->children)
prob *= incrtree(*it);
return prob;
}
else if (tp->count > 0) { // old PY table entry
U& pyn = parent_pyn[tp->cat];
F prob = (tp->count - pya)/(pyn + pyb);
assert(finite(prob)); assert(prob > 0); assert(prob <= 1);
tp->count += weight; // increment entry count
pyn += weight; // increment PY count
return prob;
}
else { // new PY table entry
{
Ss terms;
tp->terminals(terms);
bool inserted ATTRIBUTE_UNUSED = terms_pytrees[terms].insert(tp).second;
assert(inserted);
}
U& pym = parent_pym[tp->cat];
U& pyn = parent_pyn[tp->cat];
F prob = (pym*pya + pyb)/(pyn + pyb); // select new table
assert(finite(prob)); assert(prob > 0); assert(prob <= 1);
tp->count += weight; // increment count
pym += 1; // one more PY table entry
pyn += weight; // increment PY count
{
Ss children;
cforeach (tree::ptrs_type, it, tp->children)
children.push_back((*it)->cat);
prob *= incrrule(tp->cat, children, estimate_theta_flag*weight);
}
cforeach (tree::ptrs_type, it, tp->children)
prob *= incrtree(*it);
return prob;
}
} // pycfg_type::incrtree()
// added by Jackie
void retrieve_rule_counts(Ss& plu_top, trie<S, std::map<S, float> >& current_rule_counts, std::map<S, float>& current_parent_counts) {
std::set<symbol> unique_plu(plu_top.begin(), plu_top.end());
std::set<symbol>::iterator plu_iter = unique_plu.begin();
for (; plu_iter != unique_plu.end(); ++plu_iter) {
current_parent_counts[*plu_iter] = parent_weight[*plu_iter];
Sss& rhss = rules_look_up[*plu_iter];
for (size_t i = 0; i < rhss.size(); ++i) {
if (rhss[i].size() == 1) {
S_F& parent_counts = unarychild_parent_weight[rhss[i][0]];
if (parent_counts.find(*plu_iter) != parent_counts.end()) {
float count = float(parent_counts[*plu_iter]);
current_rule_counts[rhss[i]][*plu_iter] = count;
}
}
else {
S_F& parent_counts = rhs_parent_weight.find(rhss[i]) -> data;
if (parent_counts.find(*plu_iter) != parent_counts.end()) {
float count = float(parent_counts[*plu_iter]);
current_rule_counts[rhss[i]][*plu_iter] = count;
}
}
}
}
}
// added by Jackie
int retrieve_rule_weights(Ss& plu_top, trie<S, std::map<S, float> >& rule_weights_snapshot, float min = 0) {
std::set<symbol> unique_plu(plu_top.begin(), plu_top.end());
std::set<symbol>::iterator plu_iter = unique_plu.begin();
int counter = 0;
int eliminated_rule = 0;
for (; plu_iter != unique_plu.end(); ++plu_iter) {
Sss& rhss = rules_look_up[*plu_iter];
for (size_t i = 0; i < rhss.size(); ++i) {
if (rhss[i].size() == 1) {
S_F& parent_counts = unarychild_parent_weight[rhss[i][0]];
if (parent_counts.find(*plu_iter) != parent_counts.end()) {
float prob = float(parent_counts[*plu_iter]) / float(afind(parent_weight, *plu_iter));
if (prob >= min) {
++counter;
prob = prob == 0 ? MIN_PROB_VALUE : log(prob);
rule_weights_snapshot[rhss[i]][*plu_iter] = prob;
// std::cout << "rule(" << *plu_iter << " --> " << rhss[i] << ") = " << prob << std::endl;
}
else {
++eliminated_rule;
}
}
}
else {
S_F& parent_counts = rhs_parent_weight.find(rhss[i]) -> data;
if (parent_counts.find(*plu_iter) != parent_counts.end()) {
float prob = float(parent_counts[*plu_iter]) / float(afind(parent_weight, *plu_iter));
if (prob >= min) {
++counter;
prob = prob == 0 ? MIN_PROB_VALUE : log(prob);
rule_weights_snapshot[rhss[i]][*plu_iter] = prob;
// std::cout << "rule(" << *plu_iter << " --> " << rhss[i] << ") = " << prob << std::endl;
}
else {
++eliminated_rule;
}
}
}
}
}
// std::cerr << "# eliminated rules = " << eliminated_rule << endl;
return counter;
}
float filter_rules(F thres) {
unsigned int total_rule_num = 0;
unsigned int eliminated_rule_num = 0;
// unsigned int check_total_num = 0;
// filter out unary rules
filtered_unarychild_parent_weight.clear();
cforeach(S_S_F, it, unarychild_parent_weight) {
S child = it -> first;
const S_F& rules = it -> second;
cforeach(S_F, ptr, rules) {
S parent = ptr -> first;
F count = ptr -> second;
F rule_prob = count / afind(parent_weight, parent);
if (rule_prob >= thres) {
filtered_unarychild_parent_weight[child][parent] = count;
}
else {
// std::cerr << "eliminating " << parent << " --> " << child << " = " << rule_prob << std::endl;
++eliminated_rule_num;
}
++total_rule_num;
}
}
filtered_rhs_parent_weight.clear();
rhs_parent_weight.for_each(EliminateBinaryRules((*this), total_rule_num, \
eliminated_rule_num, \
thres, \
filtered_rhs_parent_weight));
filtered_terms_pytrees.clear();
terms_pytrees.for_each(EliminateCachedTables((*this), total_rule_num, \
eliminated_rule_num, \
thres, filtered_terms_pytrees));
return (float) eliminated_rule_num / total_rule_num;
}
struct CountRuleNums {
unsigned int& total_rule_num;
CountRuleNums(unsigned int& t)
: total_rule_num(t) {}
template <typename KeyType, typename DataType>
void operator() (const KeyType& children, const DataType& rules) {
cforeach (typename DataType, it, rules) {
++total_rule_num;
}
}
};
struct EliminateCachedTables {
const pycfg_type& g;
unsigned int& total_rule_num;
unsigned int& eliminated_rule_num;
F thres;
St_sT& filtered_terms_pytrees;
EliminateCachedTables(pycfg_type& grammar, unsigned int& t, unsigned int& e, \
F p, St_sT& trees)
: g(grammar), total_rule_num(t), eliminated_rule_num(e), \
thres(p), filtered_terms_pytrees(trees) {
}
template <typename KeyType, typename Trees>
void operator() (const KeyType& children, const Trees& trees) {
cforeach (typename Trees, it, trees) {
F pya = g.get_pya((*it) -> cat);
F pyb = g.get_pyb((*it) -> cat);
U pym = dfind(g.parent_pym, (*it) -> cat);
U pyn = dfind(g.parent_pyn, (*it) -> cat);
F rule_prob;
if ((*it) -> count > 0) {
rule_prob = ((*it) -> count - pya) / (pyn + pyb);
}
else {
// std::cerr << "getting things from 0 " << std::endl;
rule_prob = (pym * pya + pyb) / (pyn + pyb);
}
if (rule_prob >= thres) {
filtered_terms_pytrees[children].insert((*it));
// std::cerr << "accepting " << (*it) -> cat
// << " --> " << children << " = " << rule_prob << std::endl;
}
else {
// std::cerr << "eliminating " << (*it) -> cat
// << " --> " << children << " = " << rule_prob << std::endl;
++eliminated_rule_num;
}
++total_rule_num;
}
}
};
struct EliminateBinaryRules {
const pycfg_type& g;
unsigned int& total_rule_num;
unsigned int& eliminated_rule_num;
F thres;
St_S_F& filtered_rhs_parent_weight;
EliminateBinaryRules(pycfg_type& grammar, \
unsigned int& t, \
unsigned int& e, \
F p, \
St_S_F& binary_rules)
: g(grammar), total_rule_num(t), \
eliminated_rule_num(e), \
thres(p), \
filtered_rhs_parent_weight(binary_rules) {
}
template <typename KeyType, typename DataType>
void operator() (const KeyType& children, const DataType& rules) {
cforeach (typename DataType, it, rules) {
S parent = it -> first;
F count = it -> second;
F rule_prob = count / afind(g.parent_weight, parent);
if (rule_prob >= thres) {
filtered_rhs_parent_weight[children][parent] = count;
// std::cerr << "accepting " << parent << " --> "
// << children << " = " << rule_prob << std::endl;
}
else {
// std::cerr << "eliminating " << parent << " --> "
// << children << " = " << rule_prob << std::endl;
++eliminated_rule_num;
}
++total_rule_num;
}
}
};
// added by Jackie
void find_rule_prob_thres(tree* tp, Sss& plu_btms, \
S& parent, Ss& rhs, F& min_prob) {
// Check whether the non-terminal is cached
F pya = get_pya(tp -> cat);
if (pya == 1) {
if (tp -> is_plu_top()) {
Ss children = plu_btms[0];
assert(children.size() > 0);
plu_btms.erase(plu_btms.begin());
F branch_prob = rule_prob(tp -> cat, children);
// std::cerr << "branch_prob (" << tp -> cat << " --> " << children << " = " << branch_prob << std::endl;
if (branch_prob < min_prob) {
min_prob = branch_prob;
parent = tp -> cat;
rhs = children;
}
}
else {
Ss children;
cforeach(tree::ptrs_type, it, tp -> children) {
children.push_back((*it) -> cat);
}
F branch_prob = rule_prob(tp -> cat, children);
if (branch_prob < min_prob) {
min_prob = branch_prob;
parent = tp -> cat;
rhs = children;
}
// std::cerr << "branch_prob (" << tp -> cat << " --> " << children << " = " << branch_prob << std::endl;
cforeach(tree::ptrs_type, it, tp -> children) {
find_rule_prob_thres(*it, plu_btms, parent, rhs, min_prob);
}
}
}
else {
F pyb = get_pyb(tp -> cat);
U pym = dfind(parent_pym, tp -> cat);
U pyn = dfind(parent_pyn, tp -> cat);
if (tp -> count > 0) { // existing node
assert(tp -> count <= pyn);
assert(pym > 0);
F branch_prob = (tp -> count - pya) / (pyn + pyb);
Ss children;
tp -> terminals(children);
if (branch_prob < min_prob) {
min_prob = branch_prob;
parent = tp -> cat;
rhs = children;
}
// std::cerr << "branch_prob (" << tp -> cat << " --> " << children << " = " << branch_prob << std::endl;
std::vector<catcounttree_type*> leaf_nodes;
tp -> terminal_ptrs(leaf_nodes);
for (size_t i = 0; i < leaf_nodes.size(); ++i) {
find_rule_prob_thres(leaf_nodes[i], plu_btms, parent, rhs, min_prob);
}
}
else {
// std::cerr << "0 scenario" << std::endl;
F branch_prob = (pym * pya + pyb) / (pyn + pyb);
Ss children;
cforeach(tree::ptrs_type, it, tp -> children) {
children.push_back((*it) -> cat);
}
branch_prob *= rule_prob(tp -> cat, children);
if (branch_prob < min_prob) {
min_prob = branch_prob;
parent = tp -> cat;
rhs = children;
}
cforeach(tree::ptrs_type, it, tp -> children) {
find_rule_prob_thres((*it), plu_btms, parent, rhs, min_prob);
}
}
}
}
// added by Jackie
// find the lowest probable rule for plu_top -> plu_btm
float find_rule_prob_thres(Ss& plu_top, Sss& plu_btm, S& parent, Ss& rhs) {
if (plu_top.size() != plu_btm.size()) {
std::cerr << "plu top size != plu btm size in find_min_prob_rule" << std::endl;
}
float min_prob = 100000;
for (size_t i = 0; i < plu_top.size(); ++i) {
if (plu_btm[i].size() == 1) {
float rule_prob = \
float(unarychild_parent_weight[plu_btm[i][0]][plu_top[i]]) / float(afind(parent_weight, plu_top[i])) ;
if (rule_prob < min_prob) {
min_prob = rule_prob;
parent = plu_top[i];
rhs = plu_btm[i];
}
}
else {
float rule_prob = \
float((rhs_parent_weight.find(plu_btm[i]) -> data)[plu_top[i]]) / float(afind(parent_weight, plu_top[i]));
if (rule_prob < min_prob) {
min_prob = rule_prob;
parent = plu_top[i];
rhs = plu_btm[i];
}
}
}
// min_prob *= float(random1());
assert(min_prob >= 0);
assert(min_prob <= 1);
return min_prob;
}
//! decrtree() decrements the cache for tp, decrements
//! the rules if the cache count is appropriate, and returns
//! the probability of this tree under the new model.
//
F decrtree(tree* tp) {
U weight = 1; // Modified by Jackie
if (tp -> children.empty()) {
return 1; // terminal node
}
F pya = get_pya(tp->cat); // PY cache statistics
if (pya == 1) { // don't table this category
F prob = 1;
{
Ss children;
cforeach (tree::ptrs_type, it, tp->children) {
children.push_back((*it)->cat);
}
F ruleprob = decrrule(tp->cat, children, estimate_theta_flag*weight);
assert(ruleprob > 0);
prob *= ruleprob;
}
cforeach (tree::ptrs_type, it, tp->children) {
prob *= decrtree(*it);
}
return prob;
}
assert(weight <= tp->count);
tp -> count -= weight;
assert(afind(parent_pyn, tp->cat) >= weight);
const U pyn = (parent_pyn[tp->cat] -= weight);
F pyb = get_pyb(tp->cat);
if (tp->count > 0) { // old PY table entry
assert(pyn > 0);
F prob = (tp->count - pya)/(pyn + pyb);
assert(finite(prob)); assert(prob > 0); assert(prob <= 1);
return prob;
}
else { // tp->count == 0, remove PY table entry
{
Ss terms;
tp->terminals(terms);
sT& pytrees = terms_pytrees[terms];
sT::size_type nerased ATTRIBUTE_UNUSED = pytrees.erase(tp);
assert(nerased == 1);
if (pytrees.empty()) {
terms_pytrees.erase(terms);
}
}
// Bug: when pym or pyn goes to zero and the parent is erased,
// and then the reference to pym or pyn becomes a dangling reference
// U& pym = parent_pym[tp->cat];
// pym -= 1; // reduce cache count
assert(parent_pym.count(tp->cat) > 0);
const U pym = --parent_pym[tp->cat];
if (pym == 0) {
parent_pym.erase(tp->cat);
}
if (pyn == 0) {
parent_pyn.erase(tp->cat);
}
F prob = (pym*pya + pyb)/(pyn + pyb); // select new table
assert(finite(prob)); assert(prob > 0); assert(prob <= 1);
{
Ss children;
cforeach (tree::ptrs_type, it, tp->children)
children.push_back((*it)->cat);
prob *= decrrule(tp->cat, children, estimate_theta_flag*weight);
}
assert(prob > 0);
cforeach (tree::ptrs_type, it, tp->children) {
prob *= decrtree(*it);
}
// assert(prob > 0);
return prob;
}
} // pycfg_type::decrtree()
//! read() reads a grammar from an input stream (implements >> )
//
std::istream& read(std::istream& is) {
start = symbol::undefined();
F weight = default_weight;
F pya = default_pya;
F pyb = default_pyb;
S parent;
while (is >> default_value(weight, default_weight)
>> default_value(pya, default_pya)
>> default_value(pyb, default_pyb)
>> parent >> " -->") {
if (weight<=0)
weight=default_weight;
if (start.is_undefined())
start = parent;
Ss rhs;
readline_symbols(is, rhs);
rules_look_up[parent].push_back(rhs);
if (debug >= 100000)
std::cerr << "# " << weight << '\t' << parent << " --> " << rhs << std::endl;
incrrule(parent, rhs, weight);
if (pya != default_pya)
parent_pya[parent] = pya;
if (pyb != default_pyb)
parent_pyb[parent] = pyb;
rule_priorweight[SSs(parent,rhs)] += weight;
parent_priorweight[parent] += weight;
}
return is;
} // pycfg_type::read()
//! write() writes a grammar (implements << )
//
std::ostream& write(std::ostream& os) const {
assert(start.is_defined());
write_rules(os, start);
cforeach (S_F, it, parent_weight)
if (it->first != start)
write_rules(os, it->first);
return os;
} // pycfg_type::write()
std::ostream& write_rules(std::ostream& os, S parent) const {
rhs_parent_weight.for_each(write_rule(os, parent));
cforeach (S_S_F, it0, unarychild_parent_weight) {
S child = it0->first;
const S_F& parent_weight = it0->second;
cforeach (S_F, it1, parent_weight)
if (it1->first == parent)
os << it1->second << '\t' << parent
<< " --> " << child << std::endl;
}
bool old_compact_trees_flag = compact_trees; // save old flag
compact_trees = false; // turn off compact_trees
terms_pytrees.for_each(write_pycache(os, parent));
compact_trees = old_compact_trees_flag;
return os;
} // pycfg_type::write_rules()
//! write_rule{} writes a single rule
//
struct write_rule {
std::ostream& os;
S parent;
write_rule(std::ostream& os, symbol parent) : os(os), parent(parent) { }
template <typename Keys, typename Value>
void operator() (const Keys& rhs, const Value& parentweights) {
cforeach (typename Value, pwit, parentweights)
if (pwit->first == parent) {
os << pwit->second << '\t' << parent << " -->";
cforeach (typename Keys, rhsit, rhs)
os << ' ' << *rhsit;
os << std::endl;
}
} // pycfg_type::write_rule::operator()
}; // pycfg_type::write_rule{}
//! write_pycache{} writes the cache entries for a category
//
struct write_pycache {
std::ostream& os;
S parent;
write_pycache(std::ostream& os, S parent) : os(os), parent(parent) { }
template <typename Words, typename TreePtrs>
void operator() (const Words& words, const TreePtrs& tps) {
cforeach (typename TreePtrs, tpit, tps)
if ((*tpit)->cat == parent)
os << (*tpit) << std::endl;
} // pycfg_type::write_pycache::operator()
}; // pycfg_type::write_pycache{}
//! logPcorpus() returns the log probability of the corpus trees
//
F logPcorpus() const {
F logP = 0;
// grammar part
cforeach (SSs_F, it, rule_priorweight) {
S parent = it->first.first;
const Ss& rhs = it->first.second;
F priorweight = it->second;
F weight = rule_weight(parent, rhs);
logP += lgamma(weight) - lgamma(priorweight);
}
if (debug >= 5000)
TRACE1(logP);
cforeach (S_F, it, parent_priorweight) {
S parent = it->first;
F priorweight = it->second;
F weight =dfind(parent_weight, parent);
logP += lgamma(priorweight) - lgamma(weight);
}
if (debug >= 5000)
TRACE1(logP);
assert(logP <= 0);
// PY adaptor part
cforeach (S_U, it, parent_pyn) {
S parent = it->first;
U pyn = it->second;
U pym = afind(parent_pym, parent);
F pya = get_pya(parent);
F pyb = get_pyb(parent);
logP += lgamma(pyb) - lgamma(pyn+pyb);
for (U i = 0; i < pym; ++i)
logP += log(i*pya + pyb);
}
if (debug >= 5000)
TRACE1(logP);
terms_pytrees.for_each(logPcache(*this, logP));
if (debug >= 5000)
TRACE1(logP);
assert(logP <= 0);
return logP;
} // pycfg_type::logPcorpus()
struct logPcache {
const pycfg_type& g;
F& logP;
logPcache(const pycfg_type& g, F& logP) : g(g), logP(logP) { }
template <typename Words, typename TreePtrs>
void operator() (const Words& words, const TreePtrs& tps) {
cforeach (typename TreePtrs, it, tps) {
S parent = (*it)->cat;
U count = (*it)->count;
F pya = g.get_pya(parent);
logP += lgamma(count-pya) - lgamma(1-pya);
}
} // pycfg_type::logPcache::operator()
}; // pycfg_type::logPcache{}
//! logPrior() returns the prior probability of the PY a and b values
//
F logPrior() const {
F sumLogP = 0;
if (pyb_gamma_s > 0 && pyb_gamma_c > 0)
cforeach (S_U, it, parent_pyn) {
S parent = it->first;
F pya = get_pya(parent);
assert(pya >= 0);
assert(pya <= 1);
F pyb = get_pyb(parent);
assert(pyb >= 0);
if (pya_beta_a > 0 && pya_beta_b > 0 && pya > 0) {
F logP = pya_logPrior(pya, pya_beta_a, pya_beta_b);
if (debug >= 2000)
TRACE5(parent, logP, pya, pya_beta_a, pya_beta_b);
sumLogP += logP;
}
F logP = pyb_logPrior(pyb, pyb_gamma_c, pyb_gamma_s);
if (debug >= 2000)
TRACE5(parent, logP, pyb, pyb_gamma_c, pyb_gamma_s);
sumLogP += logP;
}
return sumLogP;
} // pycfg_type::logPrior()
//! pya_logPrior() calculates the Beta prior on pya.
//
static F pya_logPrior(F pya, F pya_beta_a, F pya_beta_b) {
F prior = lbetadist(pya, pya_beta_a, pya_beta_b); //!< prior for pya
return prior;
} // pycfg_type::pya_logPrior()
//! pyb_logPrior() calculates the prior probability of pyb
//! wrt the Gamma prior on pyb.
//
static F pyb_logPrior(F pyb, F pyb_gamma_c, F pyb_gamma_s) {
F prior = lgammadist(pyb, pyb_gamma_c, pyb_gamma_s); // prior for pyb
return prior;
} // pcfg_type::pyb_logPrior()
//////////////////////////////////////////////////////////////////////
// //
// Resample pyb //
// //
//////////////////////////////////////////////////////////////////////
//! resample_pyb_type{} is a function object that returns the part of log prob that depends on pyb.
//! This includes the Gamma prior on pyb, but doesn't include e.g. the rule probabilities
//! (as these are a constant factor)
//
struct resample_pyb_type {
typedef double F;
U pyn, pym;
F pya, pyb_gamma_c, pyb_gamma_s;
resample_pyb_type(U pyn, U pym, F pya, F pyb_gamma_c, F pyb_gamma_s)
: pyn(pyn), pym(pym), pya(pya), pyb_gamma_c(pyb_gamma_c), pyb_gamma_s(pyb_gamma_s)
{ }
//! operator() returns the part of the log posterior probability that depends on pyb
//
F operator() (F pyb) const {
assert(pyb > 0);
F logPrior = pyb_logPrior(pyb, pyb_gamma_c, pyb_gamma_s); //!< prior for pyb
F logProb = 0;
logProb += (pya == 0 ? pym*log(pyb) : pym*log(pya) + lgamma(pym + pyb/pya) - lgamma(pyb/pya));
logProb += lgamma(pyb) - lgamma(pyn+pyb);
return logProb+logPrior;
}
}; // pcfg_type::resample_pyb_type{}
//! resample_pyb() samples new values for pyb for each adapted nonterminal.
//! It returns the log prior prob of new b values.
//
void resample_pyb() {
U niterations = 20; //!< number of resampling iterations
// std::cerr << "\n## resample_pyb(), initial parent_pya = " << parent_pya << ", parent_pyb = " << parent_pyb << std::endl;
cforeach (S_U, it, parent_pyn) {
S parent = it->first;
U pyn = it->second;
U pym = afind(parent_pym, parent);
F pya = get_pya(parent);
F pyb = get_pyb(parent);
resample_pyb_type pyb_logP(pyn, pym, pya, pyb_gamma_c, pyb_gamma_s);
// pyb = slice_sampler1d(pyb_logP, pyb, random1, 0.0, std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations);
pyb = slice_sampler1dp(pyb_logP, pyb, random1, 1, niterations);
parent_pyb[parent] = pyb;
// parent_bap[parent].first += naccepted;
// parent_bap[parent].second += nproposed;
}
} // pcfg_type::resample_pyb()
//////////////////////////////////////////////////////////////////////
// //
// Resample pya and pyb //
// //
//////////////////////////////////////////////////////////////////////
//! resample_pya_type{} calculates the part of the log prob that depends on pya.
//! This includes the Beta prior on pya, but doesn't include e.g. the rule probabilities
//! (as these are a constant factor)
//
struct resample_pya_type {
U pyn, pym;
F pyb, pya_beta_a, pya_beta_b;
const Ts& trees;
resample_pya_type(U pyn, U pym, F pyb, F pya_beta_a, F pya_beta_b, const Ts& trees)
: pyn(pyn), pym(pym), pyb(pyb), pya_beta_a(pya_beta_a), pya_beta_b(pya_beta_b), trees(trees)
{ }
//! operator() returns the part of the log posterior probability that depends on pya
//
F operator() (F pya) const {
F logPrior = pya_logPrior(pya, pya_beta_a, pya_beta_b); //!< prior for pya
F logProb = 0;
F lgamma1a = lgamma(1-pya);
cforeach (Ts, it, trees) {
U count = (*it)->count;
logProb += lgamma(count-pya) - lgamma1a;
}
logProb += (pya == 0 ? pym*log(pyb) : pym*log(pya) + lgamma(pym + pyb/pya) - lgamma(pyb/pya));
return logPrior + logProb;
} // pycfg_type::resample_pya_type::operator()
}; // pycfg_type::resample_pya_type{}
//! resample_pya() samples new values for pya for each adapted nonterminal
//
void resample_pya(const S_Ts& parent_trees) {
U niterations = 20; //!< number of resampling iterations
// std::cerr << "\n## Initial parent_pya = " << parent_pya << ", parent_pyb = " << parent_pyb << std::endl;
cforeach (S_U, it, parent_pyn) {
S parent = it->first;
F pya = get_pya(parent);
if (pya == 0) // if this nonterminal has pya == 0, then don't resample
continue;
F pyb = get_pyb(parent);
U pyn = it->second;
U pym = afind(parent_pym, parent);
const Ts& trees = afind(parent_trees, parent);
resample_pya_type pya_logP(pyn, pym, pyb, pya_beta_a, pya_beta_b, trees);
pya = slice_sampler1d(pya_logP, pya, random1, std::numeric_limits<double>::min(), 1.0, 0.0, niterations);
parent_pya[parent] = pya;
}
} // pycfg_type::resample_pya()
//! resample_pyab_parent_trees_helper{} constructs parent_trees from terms_pytrees.
//
struct resample_pyab_parent_trees_helper {
S_Ts& parent_trees;
resample_pyab_parent_trees_helper(S_Ts& parent_trees) : parent_trees(parent_trees) { }
template <typename Words, typename TreePtrs>
void operator() (const Words& words, const TreePtrs& tps) {
cforeach (typename TreePtrs, it, tps) {
S parent = (*it)->cat;
parent_trees[parent].push_back(*it);
}
} // pycfg_type::resample_pyab_parent_trees_helper::operator()
}; // pycfg_type::resample_pyab_parent_trees_helper{}
//! resample_pyab() resamples both pya and pyb for each adapted nonterminal.
//
void resample_pyab() {
const U niterations = 5; //!< number of alternating samples of pya and pyb
S_Ts parent_trees;
terms_pytrees.for_each(resample_pyab_parent_trees_helper(parent_trees));
for (U i=0; i<niterations; ++i) {
resample_pyb();
resample_pya(parent_trees);
}
resample_pyb();
} // pycfg_type::resample_pyab()
//! write_adaptor_parameters() writes out adaptor parameters to a file
//
std::ostream& write_adaptor_parameters(std::ostream& os) const {
cforeach (S_F, it, parent_priorweight) {
S parent = it->first;
F pya = get_pya(parent);
if (pya == 1)
continue;
U pym = dfind(parent_pym, parent);
U pyn = dfind(parent_pyn, parent);
F pyb = get_pyb(parent);
os << ' ' << parent << ' ' << pym << ' ' << pyn << ' ' << pya << ' ' << pyb;
}
return os;
} // pycfg_type::write_adaptor_parameters()
//! initialize_predictive_parse_filter() initializes the predictive
//! parse filter by building the grammar that the Earley parser requires
//
void initialize_predictive_parse_filter() {
predictive_parse_filter = true;
cforeach (SSs_F, it, rule_priorweight) {
const SSs& rule = it->first;
const Ss& children = rule.second;
assert(!children.empty());
S child1 = children.front();
predictive_parse_filter_grammar.add_rule(it->first,
children.size() == 1
&& !parent_priorweight.count(child1));
}
} // pycfg_type::initialize_predictive_parse_filter();
}; // pycfg_type{}
//! operator>> (pycfg_type&) reads a pycfg_type g, setting g.start
//! to the parent of the first rule read.
//
inline
std::istream& operator>> (std::istream& is, pycfg_type& g) {
return g.read(is);
} // operator>> (pycfg_type&)
inline
std::ostream& operator<< (std::ostream& os, const pycfg_type& g) {
return g.write(os);
} // operator<< (pycfg_type&)
namespace std { namespace tr1 {
template <> struct hash<pycfg_type::Stit>
: public std::unary_function<pycfg_type::Stit, std::size_t> {
size_t operator()(const pycfg_type::Stit t) const
{
return size_t(&(*t));
} // ext::hash<pycfg_type::Stit>::operator()
}; // ext::hash<pycfg_type::Stit>{}
} } // namespace std::tr1
static const F unaryclosetolerance = 1e-7;
class pycky {
public:
const pycfg_type& g;
F anneal; // annealing factor (1 = no annealing)
pycky(const pycfg_type& g, F anneal=1) : g(g), anneal(anneal) { }
typedef pycfg_type::tree tree;
typedef pycfg_type::U U;
typedef pycfg_type::S_S_F S_S_F;
typedef pycfg_type::St_S_F St_S_F;
typedef pycfg_type::Stit Stit;
typedef std::vector<S_F> S_Fs;
typedef tr1::unordered_map<Stit,F> Stit_F;
typedef std::vector<Stit_F> Stit_Fs;
typedef pycfg_type::sT sT;
typedef pycfg_type::St_sT St_sT;
typedef St_sT::const_iterator StsTit;
typedef std::map<StsTit, F> StsTit_F; // modified by Jackie
typedef std::vector<StsTit> StsTits;
typedef std::set<S> sS;
//! index() returns the location of cell in cells[]
//
static U index(U i, U j) { return j*(j-1)/2+i; }
//! ncells() returns the number of cells required for sentence of length n
//
static U ncells(U n) { return n*(n+1)/2; }
Ss terminals;
S_Fs inactives;
Stit_Fs actives;
std::vector<StsTit_F> tree_leaf_probs; // modified by Jackie
std::vector<sS> top_terminals; // added by Jackie
StsTits pytits;
typedef std::vector<sS> sSs;
sSs predicteds;
// added by Jackie
//! get_top_terminals() return plu_tops for plu_bottoms
void get_top_terminals(const S_F& expansion, sS& top_terminals);
// added by Jackie
//! get_top_terminals() return plu_tops for plu_bottoms
void get_top_terminals(const S& plu_btm, \
sS& top_terminals, float min_prob);
// added by Jackie
//! get_top_terminals() return plu_tops for plu_bottoms
void get_top_terminals(const S& plu_btm, sS& top_terminals, S_S_F&);
// added by Jackie
//! get_trees() return plu_tops for plu_bottoms
void get_trees(const sS& top_terminals, StsTit_F& trees, \
S_F& probs, St_sT&);
// added by Jackie
// ! get_expansion_prob() returns prob(plu_top -> plu_btm) for a terminal
void get_expansion_prob(const S& terminal, S_F& prob, S_S_F&);
// added by Jackie
//! extend_inside()
/*
template <typename terminals_type>
F extend_inside(const terminals_type& terminals, float min_prob) {
return extend_inside(terminals, g.start, min_prob);
}
*/
// added by Jackie
//! extend_inside()
template <typename terminals_type>
F extend_inside(const terminals_type& terminals, \
S_S_F& unary_rules, \
St_S_F& rhs_rules, \
St_sT& terms) {
return extend_inside(terminals, g.start, unary_rules, rhs_rules, terms);
}
// added by Jackie
//! extend_inside() constructs the inside table, and returns the probability
//! of the start symbol rewriting to the terminals.
template <typename terminals_type>
F extend_inside(const terminals_type& terminals0, S start, \
S_S_F& unary_rules, St_S_F& rhs_rules, St_sT& terms) {
terminals = terminals0;
if (debug >= 10000) {
std::cerr << "# cky::extend_inside() terminals = " << terminals << std::endl;
}
U n = terminals.size();
top_terminals.clear(); // to store top terminals and to get tables
top_terminals.resize(ncells(n));
inactives.clear(); // for unary rules
inactives.resize(ncells(n));
actives.clear(); // for multinary rules
actives.resize(ncells(n));
tree_leaf_probs.clear(); // to store prob(top -> btm)
tree_leaf_probs.resize(ncells(n));
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (U i = 0; i < n; ++i) { // terminals
inactives[index(i, i + 1)][terminals[i]] = 1;
// get prob(plu_top -> plu_btm) for the terminals
S_F terminal_expansion_prob;
get_expansion_prob(terminals[i], terminal_expansion_prob, unary_rules);
// get top_terminals
get_top_terminals(terminals[i], top_terminals[index(i, i + 1)], unary_rules);
// get trees that expand to top_terminals
StsTit_F& tree_leaf_prob = tree_leaf_probs[index(i, i + 1)];
get_trees(top_terminals[index(i, i + 1)], tree_leaf_prob, terminal_expansion_prob, terms);
StsTit_F::const_iterator tree_iter = tree_leaf_prob.begin();
for (; tree_iter != tree_leaf_prob.end(); ++tree_iter) {
if ((tree_iter -> first) != terms.end()) {
add_pycache((tree_iter -> first) -> data, \
inactives[index(i, i + 1)], tree_iter -> second);
}
}
inside_unaryclose(inactives[index(i, i + 1)], actives[index(i, i + 1)], NULL, unary_rules, rhs_rules);
if (debug >= 20000) {
std::cerr << "# cky::extend_inside() inactives[" << i << "," << i+1 << "] = "
<< inactives[index(i,i+1)] << std::endl;
}
if (debug >= 20100) {
std::cerr << "# cky::extend_inside() actives[" << i << "," << i+1 << "] = "
<< actives[index(i,i+1)] << std::endl;
}
}
/*
for (U i = 0; i < n ; ++i) {
std::cerr << "termianl[" << i << "] = " << terminals[i] << std::endl;
std::cerr << "# top_terminals[" << index(i, i + 1) << "] = " << top_terminals[index(i, i + 1)].size() << std::endl;
}
*/
for (U gap = 2; gap <= n; ++gap) { // non-terminals
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (U left = 0; left <= n - gap; ++left) {
U right = left + gap;
S_F& parentinactives = inactives[index(left, right)];
Stit_F& parentactives = actives[index(left, right)];
for (U mid = left + 1; mid < right; ++mid) {
const S_F& rightinactives = inactives[index(mid, right)];
if (rightinactives.empty()) {
continue;
}
Stit_F& leftactives = actives[index(left, mid)];
cforeach (Stit_F, itleft, leftactives) {
const Stit leftactive = itleft -> first;
const F leftprob = itleft -> second;
cforeach (S_F, itright, rightinactives) {
S rightinactive = itright -> first;
const F rightprob = itright -> second;
const Stit parentactive = leftactive -> find1(rightinactive);
if (parentactive != leftactive -> end()) {
F leftrightprob = leftprob * rightprob;
cforeach (S_F, itparent, parentactive -> data) {
S parent = itparent -> first;
if (parent.string_reference().find("T_") != std::string::npos) {
F rule_prob = (itparent -> second) / afind(g.parent_weight, parent);
parentinactives[parent] += leftrightprob * power(rule_prob, anneal);
}
else {
parentinactives[parent] += leftrightprob * \
power(itparent -> second / afind(g.parent_weight, parent), anneal);
}
}
if (!parentactive -> key_trie.empty())
parentactives[parentactive] += leftrightprob;
}
}
}
}
// PY correction
foreach (S_F, it, parentinactives) {
F pya = g.get_pya(it -> first); // PY cache statistics
if (pya == 1.0) {
continue;
}
F pyb = g.get_pyb(it -> first);
U pym = dfind(g.parent_pym, it -> first);
U pyn = dfind(g.parent_pyn, it -> first);
it -> second *= power((pym * pya + pyb) / (pyn + pyb), anneal);
}
StsTit_F& tree_leaf_prob = tree_leaf_probs[index(left, right)];
if (gap <= 2) {
get_top_terminals(parentinactives, top_terminals[index(left, right)]);
get_trees(top_terminals[index(left, right)], tree_leaf_prob, parentinactives, terms);
}
// need to get all the py trees for this cell
// get top_terminals that map to one terminal and two terminals
StsTit_F temp;
for (U mid = right - 1 ; mid >= right - 2 && mid > left ; --mid) {
const StsTit_F& tables = tree_leaf_probs[index(left, mid)];
const S_F& plu_btm_probs = inactives[index(mid, right)];
StsTit_F::const_iterator t_iter = tables.begin();
for (; t_iter != tables.end(); ++t_iter) {
if ((t_iter -> first) != terms.end()) {
const std::map<S, trie<S, sT> >& next_tables = (t_iter -> first) -> key_trie;
std::map<S, trie<S, sT> >::const_iterator next_ptr = next_tables.begin();
for (; next_ptr != next_tables.end(); ++next_ptr) {
if (&(next_ptr -> second) != terms.end()) {
F tree_expand_prob = (t_iter -> second) * dfind(plu_btm_probs, next_ptr -> first);
if (tree_expand_prob > 0) {
tree_leaf_prob[&(next_ptr -> second)] += tree_expand_prob;
}
}
}
}
}
}
StsTit_F::const_iterator iter = tree_leaf_prob.begin();
for (; iter != tree_leaf_prob.end(); ++iter) {
if ((iter -> first) != terms.end()) {
add_pycache((iter -> first) -> data, parentinactives, iter -> second);
}
}
inside_unaryclose(parentinactives, parentactives, NULL, unary_rules, rhs_rules);
if (debug >= 20000) {
std::cerr << "# cky::extend_inside() inactives[" << left << "," << right
<< "] = " << parentinactives << std::endl;
}
if (debug >= 20100)
std::cerr << "# cky::extend_inside() actives[" << left << "," << right << "] = "
<< parentactives << std::endl;
}
}
if (inactives[index(0, n)].find(start) == inactives[index(0, n)].end()) {
std::cerr << "Can't find anything" << std::endl;
}
return dfind(inactives[index(0, n)], start);
} // pycky::extend_inside()
// added by Jackie
//! add_pycache()
void add_pycache(const sT& tps, S_F& inactives, const F prob) const {
cforeach (sT, it, tps) {
symbol cat = (*it) -> cat;
F pya = g.get_pya(cat); // PY cache statistics
if (pya == 1.0)
continue;
F pyb = g.get_pyb(cat);
U pyn = dfind(g.parent_pyn, cat);
inactives[cat] += power( ((*it)->count - pya)/(pyn + pyb), anneal) * prob;
}
} // pycky::add_cache()
void add_pycache(const sT& tps, S_F& inactives) const {
cforeach (sT, it, tps) {
symbol cat = (*it) -> cat;
F pya = g.get_pya(cat); // PY cache statistics
if (pya == 1.0)
continue;
F pyb = g.get_pyb(cat);
U pyn = dfind(g.parent_pyn, cat);
inactives[cat] += power(((*it) -> count - pya) / (pyn + pyb), anneal);
}
} // pycky::add_cache()
void inside_unaryclose(S_F& inactives, \
Stit_F& actives, \
const sS* predictedparents, \
const S_S_F& unary_rules, \
const St_S_F& rhs_rules) const {
F delta = 1;
S_F delta_prob1 = inactives;
S_F delta_prob0;
while (delta > unaryclosetolerance) {
delta = 0;
delta_prob0 = delta_prob1;
delta_prob1.clear();
cforeach (S_F, it0, delta_prob0) {
S child = it0 -> first;
S_S_F::const_iterator it = unary_rules.find(child);
if (it != unary_rules.end()) {
const S_F& parent_count = it -> second;
cforeach (S_F, it1, parent_count) {
S parent = it1 -> first;
if (parent.string_reference().find("T_") != std::string::npos) {
F prob = it0 -> second;
F pya = g.get_pya(parent);
F expansion_prob;
if (pya == 1) {
expansion_prob = it1 -> second / afind(g.parent_weight, parent);
}
else {
F pyb = g.get_pyb(parent);
U pym = dfind(g.parent_pym, parent);
U pyn = dfind(g.parent_pyn, parent);
expansion_prob = (it1 -> second) / afind(g.parent_weight, parent) \
* (pym * pya + pyb) / (pyn + pyb);
}
prob *= power(expansion_prob, anneal);
delta_prob1[parent] += prob;
delta = std::max(delta, prob/(inactives[parent] += prob));
}
else {
F prob = it0 -> second; // child's prob so far
F pya = g.get_pya(parent);
if (pya == 1)
prob *= power(it1 -> second/afind(g.parent_weight, parent), anneal); // prob(parent -> child)
else {
F pyb = g.get_pyb(parent);
U pym = dfind(g.parent_pym, parent);
U pyn = dfind(g.parent_pyn, parent);
prob *= power(it1 -> second / afind(g.parent_weight, parent) * (pym * pya + pyb) / (pyn + pyb), anneal);
}
delta_prob1[parent] += prob;
delta = std::max(delta, prob/(inactives[parent] += prob));
}
}
}
}
}
cforeach (S_F, it0, inactives) {
Stit it1 = rhs_rules.find1(it0 -> first);
if (it1 != rhs_rules.end())
actives[it1] += it0 -> second;
}
} // pycky::inside_unaryclose()
// added by Jackie
void random_plu(Sss&, StsTit, const F prob, U left, U right, St_sT& terms);
// added by Jackie
tree* extend_random_inactive(const S parent, F parentprob,
const U left, const U right, Sss& new_terminals, \
S_S_F& unary_rules, St_S_F& rhs_rules, St_sT& terms);
// added by Jackie
void extend_random_active(const Stit parent, F parentprob, const U left, const U right, \
tree::ptrs_type& siblings, Sss& new_terminals, \
S_S_F& unary_rules, St_S_F& rhs_rules, St_sT& terms);
// added by Jackie
tree* extend_random_tree(S s, Sss& new_terminals, S_S_F& unary_rules, St_S_F& rhs_rules, St_sT& terms) {
U n = terminals.size();
return extend_random_inactive(s, afind(inactives[index(0, n)], s), 0, n, new_terminals, unary_rules, rhs_rules, terms);
}
// added by Jackie
tree* extend_random_tree(Sss& new_plu_btm, \
S_S_F& unary_rules, \
St_S_F& rhs_rules, \
St_sT& terms) {
return extend_random_tree(g.start, new_plu_btm, unary_rules, rhs_rules, terms); }
//! inside() constructs the inside table, and returns the probability
//! of the start symbol rewriting to the terminals.
//
template <typename terminals_type>
F inside(const terminals_type& terminals0, S start) {
terminals = terminals0;
if (debug >= 10000)
std::cerr << "# cky::inside() terminals = " << terminals << std::endl;
U n = terminals.size();
if (g.predictive_parse_filter) {
earley(g.predictive_parse_filter_grammar, start, terminals, predicteds);
if (!predicteds[index(0,n)].count(start))
std::cerr << "## " << HERE << " Error: earley parse failed, terminals = "
<< terminals << std::endl << exit_failure;
}
inactives.clear();
inactives.resize(ncells(n));
actives.clear();
actives.resize(ncells(n));
pytits.clear();
pytits.resize(ncells(n));
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (U i = 0; i < n; ++i) { // terminals
pytits[index(i, i+1)] = g.terms_pytrees.find1(terminals[i]); // PY cache
inactives[index(i,i+1)][terminals[i]] = 1;
StsTit& pytit = pytits[index(i,i+1)];
if (pytit != g.terms_pytrees.end())
add_pycache(pytit->data, inactives[index(i,i+1)]);
inside_unaryclose(inactives[index(i,i+1)], actives[index(i,i+1)], NULL, g.unarychild_parent_weight, g.rhs_parent_weight);
if (debug >= 20000)
std::cerr << "# cky::inside() inactives[" << i << "," << i+1 << "] = "
<< inactives[index(i,i+1)] << std::endl;
if (debug >= 20100)
std::cerr << "# cky::inside() actives[" << i << "," << i+1 << "] = "
<< actives[index(i,i+1)] << std::endl;
if (debug >= 20100) {
std::cerr << "# cky::inside() pytits[" << i << "," << i+1 << "] = ";
if (pytits[index(i, i+1)] == g.terms_pytrees.end())
std::cerr << "()" << std::endl;
else
std::cerr << pytits[index(i, i+1)]->data << std::endl;
}
}
for (U gap = 2; gap <= n; ++gap) // non-terminals
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (U left = 0; left <= n-gap; ++left) {
U right = left + gap;
sS* predictedparents = g.predictive_parse_filter ? &predicteds[index(left,right)] : NULL;
const StsTit& pytit0 = pytits[index(left, right-1)];
StsTit& pytit = pytits[index(left, right)];
if (pytit0 == g.terms_pytrees.end())
pytit = g.terms_pytrees.end();
else
pytit = pytit0->find1(terminals[right-1]);
S_F& parentinactives = inactives[index(left,right)];
Stit_F& parentactives = actives[index(left,right)];
for (U mid = left+1; mid < right; ++mid) {
const S_F& rightinactives = inactives[index(mid,right)];
if (rightinactives.empty())
continue;
Stit_F& leftactives = actives[index(left,mid)];
cforeach (Stit_F, itleft, leftactives) {
const Stit leftactive = itleft->first;
const F leftprob = itleft->second;
cforeach (S_F, itright, rightinactives) {
S rightinactive = itright->first;
const F rightprob = itright->second;
const Stit parentactive = leftactive->find1(rightinactive);
if (parentactive != leftactive->end()) {
F leftrightprob = leftprob * rightprob;
cforeach (S_F, itparent, parentactive->data) {
S parent = itparent->first;
if (g.predictive_parse_filter && !predictedparents->count(parent))
continue;
parentinactives[parent] += leftrightprob * power(itparent->second/afind(g.parent_weight, parent), anneal);
}
if (!parentactive->key_trie.empty())
parentactives[parentactive] += leftrightprob;
}
}
}
}
// PY correction
foreach (S_F, it, parentinactives) {
F pya = g.get_pya(it->first); // PY cache statistics
if (pya == 1.0)
continue;
F pyb = g.get_pyb(it->first);
U pym = dfind(g.parent_pym, it->first);
U pyn = dfind(g.parent_pyn, it->first);
it->second *= power( (pym*pya + pyb)/(pyn + pyb), anneal);
}
if (pytit != g.terms_pytrees.end())
add_pycache(pytit->data, parentinactives);
inside_unaryclose(parentinactives, parentactives, predictedparents, g.unarychild_parent_weight, g.rhs_parent_weight);
if (debug >= 20000)
std::cerr << "# cky::inside() inactives[" << left << "," << right
<< "] = " << parentinactives << std::endl;
if (debug >= 20100)
std::cerr << "# cky::inside() actives[" << left << "," << right << "] = "
<< parentactives << std::endl;
if (debug >= 20100) {
std::cerr << "# cky::inside() pytits[" << left << "," << right << "] = ";
if (pytits[index(left, right)] == g.terms_pytrees.end())
std::cerr << "()" << std::endl;
else
std::cerr << pytits[index(left, right)]->data << std::endl;
}
}
return dfind(inactives[index(0,n)], start);
} // pycky::inside()
//! random_tree() returns a random parse tree for terminals
tree* random_tree(S s) {
U n = terminals.size();
return random_inactive(s, afind(inactives[index(0, n)], s), 0, n);
}
// pycky::random_tree
tree* random_tree() { return random_tree(g.start); }
//! random_inactive() returns a random expansion for an inactive edge
//
tree* random_inactive(const S parent, F parentprob,
const U left, const U right) const {
if (left + 1 == right && parent == terminals[left])
return new tree(parent);
F probthreshold = parentprob * random1();
F probsofar = 0;
F pya = g.get_pya(parent);
F rulefactor = 1;
if (pya != 1) { // get tree from cache
F pyb = g.get_pyb(parent);
U pyn = dfind(g.parent_pyn, parent);
const StsTit& pytit = pytits[index(left, right)];
if (pytit != g.terms_pytrees.end())
cforeach (sT, it, pytit->data) {
if ((*it)->cat != parent)
continue;
probsofar += power( ((*it)->count - pya)/(pyn + pyb), anneal);
if (probsofar >= probthreshold)
return *it;
}
U pym = dfind(g.parent_pym, parent);
rulefactor = (pym*pya + pyb)/(pyn + pyb);
}
// tree won't come from cache, so cons up new node
tree* tp = new tree(parent);
rulefactor /= afind(g.parent_weight, parent);
const S_F& parentinactives = inactives[index(left, right)];
// try unary rules
cforeach (S_F, it0, parentinactives) {
S child = it0->first;
F childprob = it0->second;
S_S_F::const_iterator it1 = g.unarychild_parent_weight.find(child);
if (it1 != g.unarychild_parent_weight.end()) {
const S_F& parent1_weight = it1->second;
probsofar += childprob * power(dfind(parent1_weight, parent)*rulefactor, anneal);
if (probsofar >= probthreshold) {
tp->children.push_back(random_inactive(child, childprob, left, right));
return tp;
}
}
}
// try binary rules
for (U mid = left+1; mid < right; ++mid) {
const Stit_F& leftactives = actives[index(left,mid)];
const S_F& rightinactives = inactives[index(mid,right)];
cforeach (Stit_F, itleft, leftactives) {
const Stit leftactive = itleft->first;
const F leftprob = itleft->second;
cforeach (S_F, itright, rightinactives) {
S rightinactive = itright->first;
const F rightprob = itright->second;
const Stit parentactive = leftactive->find1(rightinactive);
if (parentactive != leftactive->end()) {
S_F::const_iterator it = parentactive->data.find(parent);
if (it != parentactive->data.end()) {
probsofar += leftprob * rightprob * power(it->second*rulefactor, anneal);
if (probsofar >= probthreshold) {
random_active(leftactive, leftprob, left, mid, tp->children);
tp->children.push_back(random_inactive(rightinactive, rightprob, mid, right));
return tp;
}
}
}
}
}
}
std::cerr << "\n## Error in pycky::random_inactive(), parent = " << parent
<< ", left = " << left << ", right = " << right
<< ", probsofar = " << probsofar
<< " still below probthreshold = " << probthreshold
<< std::endl;
return tp;
} // pycky::random_inactive()
void random_active(const Stit parent, F parentprob, \
const U left, const U right,
tree::ptrs_type& siblings) const ;
}; // pycky{}
struct resample_pycache_helper {
typedef catcounttree_type tree;
pycfg_type& g;
pycky& p;
resample_pycache_helper(pycfg_type& g, pycky& p) : g(g), p(p) { }
template <typename Words, typename TreePtrs>
void operator() (const Words& words, TreePtrs& tps) {
foreach (typename TreePtrs, tit, tps) {
tree* tp0 = *tit;
Ss words;
tp0 -> terminals(words); // plu_top
S start = tp0 -> category(); // word
F old_pya = g.set_pya(start, 1.0);
F pi0 = g.decrtree(tp0);
if (pi0 < 0)
std::cerr << "## pi0 = " << pi0 << ", tp0 = " << tp0 << std::endl;
assert(pi0 >= 0);
F r0 = g.tree_prob(tp0);
assert(r0 >= 0);
F tprob = p.inside(words, start); // parse string
if (tprob <= 0)
std::cerr << "## Error in resample_pycache(): words = " << words << ", tprob = " << tprob
<< ", tp0 = " << tp0 << std::endl << "## g = " << g << std::endl;
assert(tprob >= 0);
tree* tp1 = p.random_tree(start);
F r1 = g.tree_prob(tp1);
assert(r1 >= 0);
if (tp0->generalize() == tp1->generalize()) { // ignore top count
g.incrtree(tp0);
tp1->selective_delete();
}
else { // *tp1 != *tp0, do acceptance rejection
F pi1 = g.incrtree(tp1);
F pi1r0 = pi1 * r0;
F pi0r1 = pi0 * r1;
F accept = (pi0r1 > 0) ? power(pi1r0/pi0r1, p.anneal) : 2.0; // accept if there has been an underflow
if (random1() <= accept) {
// modified by Jackie
std::vector<catcounttree_type* > tp0_leaf_nodes, tp1_leaf_nodes;
tp0 -> terminal_ptrs(tp0_leaf_nodes);
tp1 -> terminal_ptrs(tp1_leaf_nodes);
if (tp0_leaf_nodes.size() != tp1_leaf_nodes.size()) {
std::cerr << "tp1 leaf node number != tp0 leaf node number" << std::endl;
exit(-1);
}
else if (tp0_leaf_nodes.size() == 0) {
std::cerr << "tp0 leaf node number == 0!" << std::endl;
exit(-1);
}
else {
for (size_t i = 0; i < tp0_leaf_nodes.size(); ++i) {
std::swap(*tp0_leaf_nodes[i], *tp1_leaf_nodes[i]);
}
}
tp0->generalize().swap(tp1->generalize()); // don't swap top counts
tp1->selective_delete();
}
else { // don't accept
g.decrtree(tp1);
g.incrtree(tp0);
tp1->selective_delete();
}
}
g.set_pya(tp0->category(), old_pya);
}
} // resample_pycache_helper::operator()
}; // resample_pycache_helper{}
//! resample_pycache() resamples the strings associated with each cache
inline void resample_pycache(pycfg_type& g, pycky& p) {
resample_pycache_helper h(g, p);
p.g.terms_pytrees.for_each(h);
} // resample_pycache()
#endif // PY_CKY_H
|
dz1z1.c | # include <stdlib.h>
# include <stdio.h>
# include <math.h>
# include <time.h>
# include <omp.h>
#include "common.h"
int main ( int argc, char *argv[] );
double f ( double x );
int sequential ( int argc, char *argv[], double *result, double *time );
int parallel ( int argc, char *argv[], double *result, double *time );
double f ( double x ) {
double pi = 3.141592653589793;
double value;
value = 50.0 / ( pi * ( 2500.0 * x * x + 1.0 ) );
return value;
}
int sequential ( int argc, char *argv[], double *result, double *time ) {
double a;
double b;
double error;
double exact = 0.49936338107645674464;
int i;
int n;
double total;
double wtime;
double x;
if (argc != 4) {
n = 100000000;
a = 0.0;
b = 10.0;
} else {
n = atoi(argv[1]);
a = atoi(argv[2]);
b = atoi(argv[3]);
}
printf ( "\n" );
printf ( "QUAD sequential:\n" );
printf ( " Estimate the integral of f(x) from A to B.\n" );
printf ( " f(x) = 50 / ( pi * ( 2500 * x * x + 1 ) ).\n" );
printf ( "\n" );
printf ( " A = %f\n", a );
printf ( " B = %f\n", b );
printf ( " N = %d\n", n );
printf ( " Exact = %24.16f\n", exact );
wtime = omp_get_wtime ( );
total = 0.0;
for ( i = 0; i < n; i++ )
{
x = ( ( double ) ( n - i - 1 ) * a + ( double ) ( i ) * b ) / ( double ) ( n - 1 );
total = total + f ( x );
}
wtime = omp_get_wtime ( ) - wtime;
total = ( b - a ) * total / ( double ) n;
error = fabs ( total - exact );
printf ( "\n" );
printf ( " Estimate = %24.16f\n", total );
*result = total;
printf ( " Error = %e\n", error );
printf ( " Time = %f\n", wtime );
*time = wtime;
printf ( "\n" );
printf ( " Normal end of execution.\n" );
printf ( "\n" );
return 0;
}
int parallel ( int argc, char *argv[], double *result, double *time ) {
double a;
double b;
double error;
double exact = 0.49936338107645674464;
int i;
int n;
double total;
double wtime;
double x;
if (argc != 4) {
n = 100000000;
a = 0.0;
b = 10.0;
} else {
n = atoi(argv[1]);
a = atoi(argv[2]);
b = atoi(argv[3]);
}
printf ( "\n" );
printf ( "QUAD parallel:\n" );
printf ( " Estimate the integral of f(x) from A to B.\n" );
printf ( " f(x) = 50 / ( pi * ( 2500 * x * x + 1 ) ).\n" );
printf ( "\n" );
printf ( " A = %f\n", a );
printf ( " B = %f\n", b );
printf ( " N = %d\n", n );
printf ( " Exact = %24.16f\n", exact );
wtime = omp_get_wtime ( );
total = 0.0;
#pragma omp parallel \
private(i, x) \
reduction(+:total)
{
double num_threads = omp_get_num_threads();
int thread_id = omp_get_thread_num();
int chunk_size = ceil(n/num_threads);
int iter_start = thread_id * chunk_size;
int iter_end = (thread_id + 1) * chunk_size;
for ( i = iter_start; i < iter_end; i++ )
{
x = ( ( double ) ( n - i - 1 ) * a + ( double ) ( i ) * b ) / ( double ) ( n - 1 );
total = total + f ( x );
}
}
wtime = omp_get_wtime ( ) - wtime;
total = ( b - a ) * total / ( double ) n;
error = fabs ( total - exact );
printf ( "\n" );
printf ( " Estimate = %24.16f\n", total );
*result = total;
printf ( " Error = %e\n", error );
printf ( " Time = %f\n", wtime );
*time = wtime;
printf ( "\n" );
printf ( " Normal end of execution.\n" );
printf ( "\n" );
return 0;
}
int main ( int argc, char *argv[]) {
double sequential_result, parallel_result, sequential_time, parallel_time;
int err;
err = parallel(argc, argv, ¶llel_result, ¶llel_time);
if (err) { return err; }
err = sequential(argc, argv, &sequential_result, &sequential_time);
if (err) { return err; }
finish_1(sequential_result, parallel_result, sequential_time, parallel_time);
}
|
GB_binop__pair_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_uint16)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint16)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint16_t
// A type: uint16_t
// A pattern? 1
// B type: uint16_t
// B pattern? 1
// BinaryOp: cij = 1
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_UINT16 || GxB_NO_PAIR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pair_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colorspace-private.h"
#include "magick/configure.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#elif defined(MAGICKCORE_HAVE_LCMS2_H)
#include <wchar.h>
#include "lcms2.h"
#elif defined(MAGICKCORE_HAVE_LCMS_LCMS_H)
#include <lcms/lcms.h>
#else
#include "lcms.h"
#endif
#endif
/*
Define declarations.
*/
#if !defined(LCMS_VERSION) || (LCMS_VERSION < 2000)
#define cmsSigCmykData icSigCmykData
#define cmsSigGrayData icSigGrayData
#define cmsSigLabData icSigLabData
#define cmsSigLuvData icSigLuvData
#define cmsSigRgbData icSigRgbData
#define cmsSigXYZData icSigXYZData
#define cmsSigYCbCrData icSigYCbCrData
#define cmsSigLinkClass icSigLinkClass
#define cmsColorSpaceSignature icColorSpaceSignature
#define cmsUInt32Number DWORD
#define cmsSetLogErrorHandler(handler) cmsSetErrorHandler(handler)
#define cmsCreateTransformTHR(context,source_profile,source_type, \
target_profile,target_type,intent,flags) cmsCreateTransform(source_profile, \
source_type,target_profile,target_type,intent,flags);
#define cmsOpenProfileFromMemTHR(context,profile,length) \
cmsOpenProfileFromMem(profile,length)
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickSignature);
image->color_profile.length=clone_image->color_profile.length;
image->color_profile.info=clone_image->color_profile.info;
image->iptc_profile.length=clone_image->iptc_profile.length;
image->iptc_profile.info=clone_image->iptc_profile.info;
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
char
key[MaxTextExtent];
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
(void) CopyMagickString(key,name,MaxTextExtent);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,key);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
static unsigned short **DestroyPixelThreadSet(unsigned short **pixels)
{
register ssize_t
i;
assert(pixels != (unsigned short **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (unsigned short *) NULL)
pixels[i]=(unsigned short *) RelinquishMagickMemory(pixels[i]);
pixels=(unsigned short **) RelinquishMagickMemory(pixels);
return(pixels);
}
static unsigned short **AcquirePixelThreadSet(const size_t columns,
const size_t channels)
{
register ssize_t
i;
unsigned short
**pixels;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(unsigned short **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (unsigned short **) NULL)
return((unsigned short **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(unsigned short *) AcquireQuantumMemory(columns,channels*
sizeof(**pixels));
if (pixels[i] == (unsigned short *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image,
const cmsHPROFILE source_profile,const cmsUInt32Number source_type,
const cmsHPROFILE target_profile,const cmsUInt32Number target_type,
const int intent,const cmsUInt32Number flags)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(image,source_profile,source_type,
target_profile,target_type,intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
#endif
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(LCMS_VERSION) && (LCMS_VERSION >= 2000)
static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
Image
*image;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
image=(Image *) context;
if (image != (Image *) NULL)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageWarning,"UnableToTransformColorspace","`%s'",image->filename);
}
#else
static int LCMSExceptionHandler(int severity,const char *message)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%d, %s",
severity,message != (char *) NULL ? message : "no message");
return(1);
}
#endif
#endif
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,
const MagickBooleanType magick_unused(clone))
{
#define ProfileImageTag "Profile/Image"
#define ThrowProfileException(severity,tag,context) \
{ \
if (source_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_profile); \
if (target_profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
**arguments,
*names;
int
number_arguments;
register ssize_t
i;
/*
Delete image profile(s).
*/
names=ConstantString(name);
(void) SubstituteString(&names,","," ");
arguments=StringToArgv(names,&number_arguments);
names=DestroyString(names);
if (arguments == (char **) NULL)
return(MagickTrue);
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
for (i=1; i < (ssize_t) number_arguments; i++)
{
if ((*arguments[i] == '!') &&
(LocaleCompare(name,arguments[i]+1) == 0))
break;
if (GlobExpression(name,arguments[i],MagickTrue) != MagickFalse)
{
(void) DeleteImageProfile(image,name);
ResetImageProfileIterator(image);
break;
}
}
name=GetNextImageProfile(image);
}
for (i=0; i < (ssize_t) number_arguments; i++)
arguments[i]=DestroyString(arguments[i]);
arguments=(char **) RelinquishMagickMemory(arguments);
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace");
(void) value;
/* Future.
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image);
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image);
value=GetImageProperty(image,"exif:InteroperabilityIndex");
if (LocaleCompare(value,"R03.") != 0)
(void) SetAdobeRGB1998ImageProfile(image);
*/
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)",
image->filename);
#else
{
cmsHPROFILE
source_profile;
/*
Transform pixel colors as defined by the color profiles.
*/
cmsSetLogErrorHandler(LCMSExceptionHandler);
source_profile=cmsOpenProfileFromMemTHR(image,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile);
else
{
CacheView
*image_view;
ColorspaceType
source_colorspace,
target_colorspace;
cmsColorSpaceSignature
signature;
cmsHPROFILE
target_profile;
cmsHTRANSFORM
*restrict transform;
cmsUInt32Number
flags,
source_type,
target_type;
ExceptionInfo
*exception;
int
intent;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
source_channels,
target_channels;
ssize_t
y;
unsigned short
**restrict source_pixels,
**restrict target_pixels;
exception=(&image->exception);
target_profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_profile=source_profile;
source_profile=cmsOpenProfileFromMemTHR(image,
GetStringInfoDatum(icc_profile),(cmsUInt32Number)
GetStringInfoLength(icc_profile));
if (source_profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
switch (cmsGetColorSpace(source_profile))
{
case cmsSigCmykData:
{
source_colorspace=CMYKColorspace;
source_type=(cmsUInt32Number) TYPE_CMYK_16;
source_channels=4;
break;
}
case cmsSigGrayData:
{
source_colorspace=GRAYColorspace;
source_type=(cmsUInt32Number) TYPE_GRAY_16;
source_channels=1;
break;
}
case cmsSigLabData:
{
source_colorspace=LabColorspace;
source_type=(cmsUInt32Number) TYPE_Lab_16;
source_channels=3;
break;
}
case cmsSigLuvData:
{
source_colorspace=YUVColorspace;
source_type=(cmsUInt32Number) TYPE_YUV_16;
source_channels=3;
break;
}
case cmsSigRgbData:
{
source_colorspace=sRGBColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
case cmsSigXYZData:
{
source_colorspace=XYZColorspace;
source_type=(cmsUInt32Number) TYPE_XYZ_16;
source_channels=3;
break;
}
case cmsSigYCbCrData:
{
source_colorspace=YCbCrColorspace;
source_type=(cmsUInt32Number) TYPE_YCbCr_16;
source_channels=3;
break;
}
default:
{
source_colorspace=UndefinedColorspace;
source_type=(cmsUInt32Number) TYPE_RGB_16;
source_channels=3;
break;
}
}
signature=cmsGetPCS(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_profile);
switch (signature)
{
case cmsSigCmykData:
{
target_colorspace=CMYKColorspace;
target_type=(cmsUInt32Number) TYPE_CMYK_16;
target_channels=4;
break;
}
case cmsSigLabData:
{
target_colorspace=LabColorspace;
target_type=(cmsUInt32Number) TYPE_Lab_16;
target_channels=3;
break;
}
case cmsSigGrayData:
{
target_colorspace=GRAYColorspace;
target_type=(cmsUInt32Number) TYPE_GRAY_16;
target_channels=1;
break;
}
case cmsSigLuvData:
{
target_colorspace=YUVColorspace;
target_type=(cmsUInt32Number) TYPE_YUV_16;
target_channels=3;
break;
}
case cmsSigRgbData:
{
target_colorspace=sRGBColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
case cmsSigXYZData:
{
target_colorspace=XYZColorspace;
target_type=(cmsUInt32Number) TYPE_XYZ_16;
target_channels=3;
break;
}
case cmsSigYCbCrData:
{
target_colorspace=YCbCrColorspace;
target_type=(cmsUInt32Number) TYPE_YCbCr_16;
target_channels=3;
break;
}
default:
{
target_colorspace=UndefinedColorspace;
target_type=(cmsUInt32Number) TYPE_RGB_16;
target_channels=3;
break;
}
}
if ((source_colorspace == UndefinedColorspace) ||
(target_colorspace == UndefinedColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == GRAYColorspace) &&
(IsGrayImage(image,exception) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == CMYKColorspace) &&
(image->colorspace != CMYKColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == XYZColorspace) &&
(image->colorspace != XYZColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace == YCbCrColorspace) &&
(image->colorspace != YCbCrColorspace))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
if ((source_colorspace != CMYKColorspace) &&
(source_colorspace != GRAYColorspace) &&
(source_colorspace != LabColorspace) &&
(source_colorspace != XYZColorspace) &&
(source_colorspace != YCbCrColorspace) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch",
name);
switch (image->rendering_intent)
{
case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break;
case PerceptualIntent: intent=INTENT_PERCEPTUAL; break;
case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break;
case SaturationIntent: intent=INTENT_SATURATION; break;
default: intent=INTENT_PERCEPTUAL; break;
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(image,source_profile,
source_type,target_profile,target_type,intent,flags);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_pixels=AcquirePixelThreadSet(image->columns,source_channels);
target_pixels=AcquirePixelThreadSet(image->columns,target_channels);
if ((source_pixels == (unsigned short **) NULL) ||
(target_pixels == (unsigned short **) NULL))
{
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (source_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
return(MagickFalse);
}
if (target_colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_colorspace);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
register unsigned short
*p;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p=source_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=ScaleQuantumToShort(GetPixelRed(q));
if (source_channels > 1)
{
*p++=ScaleQuantumToShort(GetPixelGreen(q));
*p++=ScaleQuantumToShort(GetPixelBlue(q));
}
if (source_channels > 3)
*p++=ScaleQuantumToShort(GetPixelIndex(indexes+x));
q++;
}
cmsDoTransform(transform[id],source_pixels[id],target_pixels[id],
(unsigned int) image->columns);
p=target_pixels[id];
q-=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleShortToQuantum(*p));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
p++;
if (target_channels > 1)
{
SetPixelGreen(q,ScaleShortToQuantum(*p));
p++;
SetPixelBlue(q,ScaleShortToQuantum(*p));
p++;
}
if (target_channels > 3)
{
SetPixelIndex(indexes+x,ScaleShortToQuantum(*p));
p++;
}
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ProfileImage)
#endif
proceed=SetImageProgress(image,ProfileImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_colorspace);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->matte == MagickFalse ? TrueColorType :
TrueColorMatteType;
break;
}
case cmsSigCmykData:
{
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
break;
}
case cmsSigGrayData:
{
image->type=image->matte == MagickFalse ? GrayscaleType :
GrayscaleMatteType;
break;
}
default:
break;
}
target_pixels=DestroyPixelThreadSet(target_pixels);
source_pixels=DestroyPixelThreadSet(source_pixels);
transform=DestroyTransformThreadSet(transform);
if (cmsGetDeviceClass(source_profile) != cmsSigLinkClass)
status=SetImageProfile(image,name,profile);
if (target_profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_profile);
}
(void) cmsCloseProfile(source_profile);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
if (LocaleCompare(name,"icc") == 0)
{
/*
Continue to support deprecated color profile for now.
*/
image->color_profile.length=0;
image->color_profile.info=(unsigned char *) NULL;
}
if (LocaleCompare(name,"iptc") == 0)
{
/*
Continue to support deprecated IPTC profile for now.
*/
image->iptc_profile.length=0;
image->iptc_profile.info=(unsigned char *) NULL;
}
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceBytes(const unsigned char *p,
const ssize_t count,unsigned char *quantum)
{
register ssize_t
i;
for (i=0; i < count; i++)
*quantum++=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
size_t *quantum)
{
*quantum=(size_t) (*p++ << 24);
*quantum|=(size_t) (*p++ << 16);
*quantum|=(size_t) (*p++ << 8);
*quantum|=(size_t) (*p++ << 0);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++ << 8);
*quantum|=(unsigned short) (*p++ << 0);
return(p);
}
static MagickBooleanType GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile;
unsigned char
length_byte;
size_t
count;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&count);
if ((p > (datum+length-count)) || (count > length))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution.
*/
p=ReadResourceShort(p,&resolution)+6;
image->x_resolution=(double) resolution;
p=ReadResourceShort(p,&resolution)+6;
image->y_resolution=(double) resolution;
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"iptc",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"icc",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"exif",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfile(image,"xmp",profile);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return(MagickTrue);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile)
{
char
key[MaxTextExtent],
property[MaxTextExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MaxTextExtent);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if ((status != MagickFalse) &&
((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0)))
{
const StringInfo
*icc_profile;
/*
Continue to support deprecated color profile member.
*/
icc_profile=GetImageProfile(image,name);
if (icc_profile != (const StringInfo *) NULL)
{
image->color_profile.length=GetStringInfoLength(icc_profile);
image->color_profile.info=GetStringInfoDatum(icc_profile);
}
}
if ((status != MagickFalse) &&
((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0)))
{
const StringInfo
*iptc_profile;
/*
Continue to support deprecated IPTC profile member.
*/
iptc_profile=GetImageProfile(image,name);
if (iptc_profile != (const StringInfo *) NULL)
{
image->iptc_profile.length=GetStringInfoLength(iptc_profile);
image->iptc_profile.info=GetStringInfoDatum(iptc_profile);
}
(void) GetProfilesFromResourceBlock(image,profile);
}
/*
Inject profile into image properties.
*/
(void) FormatLocaleString(property,MaxTextExtent,"%s:sans",name);
(void) GetImageProperty(image,property);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline unsigned short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) ((buffer[1] << 8) | buffer[0]);
return((unsigned short) (value & 0xffff));
}
value=(unsigned short) ((((unsigned char *) buffer)[0] << 8) |
((unsigned char *) buffer)[1]);
return((unsigned short) (value & 0xffff));
}
static inline size_t ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
size_t
value;
if (endian == LSBEndian)
{
value=(size_t) ((buffer[3] << 24) | (buffer[2] << 16) |
(buffer[1] << 8 ) | (buffer[0]));
return((size_t) (value & 0xffffffff));
}
value=(size_t) ((buffer[0] << 24) | (buffer[1] << 16) |
(buffer[2] << 8) | buffer[3]);
return((size_t) (value & 0xffffffff));
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) CopyMagickMemory(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) CopyMagickMemory(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) CopyMagickMemory(p,buffer,2);
}
MagickExport MagickBooleanType SyncImageProfiles(Image *image)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
StringInfo
*profile;
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile == (StringInfo *) NULL)
return(MagickTrue);
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ((int) ReadProfileLong(endian,exif+4));
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
components,
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format-1) >= EXIF_NUM_FORMATS)
break;
components=(ssize_t) ((int) ReadProfileLong(endian,q+4));
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
ssize_t
offset;
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ((int) ReadProfileLong(endian,q+8));
if ((ssize_t) (offset+number_bytes) < offset)
continue; /* prevent overflow */
if ((size_t) (offset+number_bytes) > length)
continue;
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p);
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
ssize_t
offset;
offset=(ssize_t) ((int) ReadProfileLong(endian,p));
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ((int) ReadProfileLong(endian,directory+2+(12*
number_entries)));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
|
GB_unop__identity_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: (none)
// op(A') function: GB_unop_tran__identity_uint64_uint64
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint64_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
core_dtrsm_blasfeo.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from core_blas/core_ztrsm.c, normal z -> d, Thu Aug 8 17:24:56 2019
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
#include "blasfeo_d_aux.h"
/***************************************************************************//**
*
* @ingroup core_trsm
*
* Solves one of the matrix equations
*
* \f[ op( A )\times X = \alpha B, \f] or
* \f[ X \times op( A ) = \alpha B, \f]
*
* where op( A ) is one of:
* \f[ op( A ) = A, \f]
* \f[ op( A ) = A^T, \f]
* \f[ op( A ) = A^T, \f]
*
* alpha is a scalar, X and B are m-by-n matrices, and
* A is a unit or non-unit, upper or lower triangular matrix.
* The matrix X overwrites B.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft: op(A)*X = B,
* - PlasmaRight: X*op(A) = B.
*
* @param[in] uplo
* - PlasmaUpper: A is upper triangular,
* - PlasmaLower: A is lower triangular.
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* - PlasmaNonUnit: A has non-unit diagonal,
* - PlasmaUnit: A has unit diagonal.
*
* @param[in] m
* The number of rows of the matrix B. m >= 0.
*
* @param[in] n
* The number of columns of the matrix B. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* The lda-by-ka triangular matrix,
* where ka = m if side = PlasmaLeft,
* and ka = n if side = PlasmaRight.
* If uplo = PlasmaUpper, the leading k-by-k upper triangular part
* of the array A contains the upper triangular matrix, and the
* strictly lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading k-by-k lower triangular part
* of the array A contains the lower triangular matrix, and the
* strictly upper triangular part of A is not referenced.
* If diag = PlasmaUnit, the diagonal elements of A are also not
* referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,k).
*
* @param[in,out] B
* On entry, the ldb-by-n right hand side matrix B.
* On exit, if return value = 0, the ldb-by-n solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_dtrsm_blasfeo(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
double alpha, struct blasfeo_dmat *sA, int ai, int aj,
struct blasfeo_dmat *sB, int bi, int bj)
{
// cblas_dtrsm(CblasColMajor,
// (CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
// (CBLAS_TRANSPOSE)transa, (CBLAS_DIAG)diag,
// m, n,
// (alpha), A, lda,
// B, ldb);
blasfeo_dtrsm_rltn(m, n, alpha, sA, ai, aj, sB, bi, bj, sB, bi, bj);
}
/******************************************************************************/
void plasma_core_omp_dtrsm_blasfeo(
plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
double alpha, const struct blasfeo_dmat *sA, int ai, int aj,
struct blasfeo_dmat *sB, int bi, int bj,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (side == PlasmaLeft)
ak = m;
else
ak = n;
struct blasfeo_dmat sA2, sB2;
sA2 = *sA;
sB2 = *sB;
double *A = sA->pA;
int sda = sA->cn;
double *B = sB->pA;
int sdb = sB->cn;
// #pragma omp task depend(in:A[0:lda*ak]) \
// depend(inout:B[0:ldb*n])
#pragma omp task depend(in:A[0:sda*ak]) \
depend(inout:B[0:sdb*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_dtrsm_blasfeo(side, uplo,
transa, diag,
m, n,
alpha, &sA2, ai, aj,
&sB2, bi, bj);
}
}
|
a.19.1.c | /* { dg-do run } */
int x, *p = &x;
extern void abort (void);
void
f1 (int *q)
{
*q = 1;
#pragma omp flush
/* x, p, and *q are flushed */
/* because they are shared and accessible */
/* q is not flushed because it is not shared. */
}
void
f2 (int *q)
{
#pragma omp barrier
*q = 2;
#pragma omp barrier
/* a barrier implies a flush */
/* x, p, and *q are flushed */
/* because they are shared and accessible */
/* q is not flushed because it is not shared. */
}
int
g (int n)
{
int i = 1, j, sum = 0;
*p = 1;
#pragma omp parallel reduction(+: sum) num_threads(2)
{
f1 (&j);
/* i, n and sum were not flushed */
/* because they were not accessible in f1 */
/* j was flushed because it was accessible */
sum += j;
f2 (&j);
/* i, n, and sum were not flushed */
/* because they were not accessible in f2 */
/* j was flushed because it was accessible */
sum += i + j + *p + n;
}
return sum;
}
int
main ()
{
int result = g (10);
if (result != 30)
abort ();
return 0;
}
|
omp.cats | /* ****** ****** */
/*
(*
** Permission to use, copy, modify, and distribute this software for any
** purpose with or without fee is hereby granted, provided that the above
** copyright notice and this permission notice appear in all copies.
**
** THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
** WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
** MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
** ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
** WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
** ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
** OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*)
*/
/* ****** ****** */
/*
** Author: Brandon Barker
** Authoremail: bbarkerATgmailDOTcom
** Start time: May, 2014
*/
/* ****** ****** */
/*
** Author: Hongwei Xi
** Authoremail: gmhwxiATgmailDOTcom
** Start time: June, 2014
*/
/* ****** ****** */
#ifndef OPENMP_OMP_CATS
#define OPENMP_OMP_CATS
/* ****** ****** */
#include <omp.h>
/* ****** ****** */
//
// BB: Utilities
//
#define atscntrb_openmp_STR(x) #x
#define atscntrb_openmp_STRINGIFY(x) atscntrb_openmp_STR(x)
#define atscntrb_openmp_CONCATFUN(X, Y) X ( Y )
//
/* ****** ****** */
#define \
atscntrb_openmp_omp_get_num_procs() omp_get_num_procs()
/* ****** ****** */
#define \
atscntrb_openmp_omp_get_num_threads() omp_get_num_threads()
#define \
atscntrb_openmp_omp_set_num_threads(n) omp_set_num_threads(n)
/* ****** ****** */
#define \
atscntrb_openmp_omp_get_thread_num() omp_get_thread_num()
/* ****** ****** */
//
#define \
atscntrb_openmp_pragma_omp_barrier() \
_Pragma(atscntrb_openmp_STRINGIFY(omp barrier))
//
/* ****** ****** */
//
// #pragma omp parallel private(tid)
//
#define \
atscntrb_openmp_pragma_omp_parallel_private_beg(tid) \
_Pragma(atscntrb_openmp_STRINGIFY(atscntrb_openmp_CONCATFUN(omp parallel private, tid))) {
//
#define \
atscntrb_openmp_pragma_omp_parallel_private_end(tid) }
//
/* ****** ****** */
#endif // ifndef OPENMP_OMP_CATS
/* ****** ****** */
/* end of [omp.cats] */
|
GB_unop__tgamma_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__tgamma_fp64_fp64)
// op(A') function: GB (_unop_tran__tgamma_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = tgamma (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = tgamma (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = tgamma (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TGAMMA || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__tgamma_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tgamma (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = tgamma (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__tgamma_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ActiveRotatingFilter.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/ActiveRotatingFilter.c"
#else
static int orn_(ARF_MappingRotate)(lua_State *L)
{
THTensor *weight = luaT_checkudata(L, 2, torch_Tensor);
THTensor *indices = luaT_checkudata(L, 3, torch_Tensor);
THTensor *output = luaT_checkudata(L, 4, torch_Tensor);
const uint8 kW = lua_tonumber(L, 5);
const uint8 kH = lua_tonumber(L, 6);
const uint16 nInputPlane = lua_tonumber(L, 7);
const uint16 nOutputPlane = lua_tonumber(L, 8);
const uint8 nOrientation = lua_tonumber(L, 9);
const uint8 nRotation = lua_tonumber(L, 10);
real *weightData = THTensor_(data)(weight);
real *indicesData = THTensor_(data)(indices);
real *outputData = THTensor_(data)(output);
const uint16 nEntry = nOrientation * kH * kW;
uint16 i, j, l;
uint8 k;
#pragma omp parallel for private(i)
for (i = 0; i < nOutputPlane; i++) {
for (j = 0; j < nInputPlane; j++) {
for (l = 0; l < nEntry; l++) {
real val = *(weightData++);
for (k = 0; k < nRotation; k++) {
uint16 index = (uint16)(*(indicesData + l * nRotation + k)) - 1;
real *target = outputData + i * (nRotation * nInputPlane * nEntry)
+ k * (nInputPlane * nEntry)
+ j * (nEntry)
+ index;
*target = val;
}
}
}
}
return 0;
}
static int orn_(ARF_MappingAlign)(lua_State *L)
{
THTensor *weight = luaT_checkudata(L, 2, torch_Tensor);
THTensor *indices = luaT_checkudata(L, 3, torch_Tensor);
THTensor *gradWeight = luaT_checkudata(L, 4, torch_Tensor);
const uint8 kW = lua_tonumber(L, 5);
const uint8 kH = lua_tonumber(L, 6);
const uint16 nInputPlane = lua_tonumber(L, 7);
const uint16 nOutputPlane = lua_tonumber(L, 8);
const uint8 nOrientation = lua_tonumber(L, 9);
const uint8 nRotation = lua_tonumber(L, 10);
real *weightData = THTensor_(data)(weight);
real *indicesData = THTensor_(data)(indices);
real *gradWeightData = THTensor_(data)(gradWeight);
const uint16 nEntry = nOrientation * kH * kW;
uint16 i, j, l;
uint8 k;
#pragma omp parallel for private(i)
for (i = 0; i < nOutputPlane; i++) {
for (j = 0; j < nInputPlane; j++) {
for (l = 0; l < nEntry; l++) {
real *val = weightData++;
for (k = 0; k < nRotation; k++) {
uint16 index = (uint16)(*(indicesData + l * nRotation + k)) - 1;
real *target = gradWeightData + i * (nRotation * nInputPlane * nEntry)
+ k * (nInputPlane * nEntry)
+ j * (nEntry)
+ index;
*val += *target;
}
}
}
}
return 0;
}
static int orn_(ARF_Rotate)(lua_State *L)
{
THTensor *weight = luaT_checkudata(L, 2, torch_Tensor);
THTensor *indices = luaT_checkudata(L, 3, torch_Tensor);
THTensor *factors = luaT_checkudata(L, 4, torch_Tensor);
THTensor *weightBuffer = luaT_checkudata(L, 5, torch_Tensor);
THTensor *buffer = luaT_checkudata(L, 6, torch_Tensor);
const uint8 srcW = lua_tonumber(L, 7);
const uint8 srcH = lua_tonumber(L, 8);
const uint8 dstW = lua_tonumber(L, 9);
const uint8 dstH = lua_tonumber(L, 10);
const uint16 nInputPlane = lua_tonumber(L, 11);
const uint16 nOutputPlane = lua_tonumber(L, 12);
const uint8 nOrientation = lua_tonumber(L, 13);
const uint8 nRotation = lua_tonumber(L, 14);
real *weightData = THTensor_(data)(weight);
real *indicesData = THTensor_(data)(indices);
real *factorsData = THTensor_(data)(factors);
real *bufferData = THTensor_(data)(buffer);
real *weightBufferData = THTensor_(data)(weightBuffer);
real *src;
real *target;
real *elements;
const uint16 srcEntry = srcH * srcW;
const uint16 dstEntry = dstH * dstW;
uint16 i, j, m;
uint8 l, n, k;
target = (nOrientation == 1) ? weightBufferData : bufferData;
#pragma omp parallel for private(i)
for (i = 0; i < nOutputPlane; i++) {
for (k = 0; k < nRotation; k++) {
for (j = 0; j < nInputPlane; j++) {
for (l = 0; l < nOrientation; l++) {
src = weightData + i * (nInputPlane * nOrientation * srcEntry)
+ j * (nOrientation * srcEntry)
+ l * srcEntry;
for (m = 0; m < dstEntry; m++) {
elements = indicesData + k * (dstEntry * 8) + m * 8;
*(target++) = *(src + (uint8)elements[1]) * elements[0]
+ *(src + (uint8)elements[3]) * elements[2]
+ *(src + (uint8)elements[5]) * elements[4]
+ *(src + (uint8)elements[7]) * elements[6];
}
}
}
}
}
if (nOrientation == 1)
return 0;
target = weightBufferData;
#pragma omp parallel for private(i)
for (i = 0; i < nOutputPlane; i++) {
for (k = 0; k < nRotation; k++) {
for (j = 0; j < nInputPlane; j++) {
for (l = 0; l < nOrientation; l++) {
for (m = 0; m < dstEntry; m++) {
src = bufferData + i * (nRotation * nInputPlane * nOrientation * dstEntry)
+ k * (nInputPlane * nOrientation * dstEntry)
+ j * (nOrientation * dstEntry)
+ m;
elements = factorsData + k * (nOrientation * nOrientation)
+ l * nOrientation;
*target = 0.0f;
for (n = 0; n < nOrientation; n++) {
*target += *(src + n * dstEntry) * elements[n];
}
target++;
}
}
}
}
}
return 0;
}
static int orn_(ARF_Align)(lua_State *L)
{
THTensor *gradWeight = luaT_checkudata(L, 2, torch_Tensor);
THTensor *indices = luaT_checkudata(L, 3, torch_Tensor);
THTensor *factors = luaT_checkudata(L, 4, torch_Tensor);
THTensor *buffer = luaT_checkudata(L, 5, torch_Tensor);
THTensor *gradWeightBuffer = luaT_checkudata(L, 6, torch_Tensor);
const uint8 srcW = lua_tonumber(L, 7);
const uint8 srcH = lua_tonumber(L, 8);
const uint8 dstW = lua_tonumber(L, 9);
const uint8 dstH = lua_tonumber(L, 10);
const uint16 nInputPlane = lua_tonumber(L, 11);
const uint16 nOutputPlane = lua_tonumber(L, 12);
const uint8 nOrientation = lua_tonumber(L, 13);
const uint8 nRotation = lua_tonumber(L, 14);
real *gradWeightData = THTensor_(data)(gradWeight);
real *indicesData = THTensor_(data)(indices);
real *factorsData = THTensor_(data)(factors);
real *gradWeightBufferData = THTensor_(data)(gradWeightBuffer);
real *bufferData = THTensor_(data)(buffer);
real *src;
real *target;
real *elements;
const uint8 srcEntry = srcH * srcW;
const uint8 dstEntry = dstH * dstW;
uint16 i, j, m;
uint8 l, n, k;
if (nOrientation > 1) {
target = bufferData;
#pragma omp parallel for private(i)
for (i = 0; i < nOutputPlane; i++) {
for (k = 0; k < nRotation; k++) {
for (j = 0; j < nInputPlane; j++) {
for (l = 0; l < nOrientation; l++) {
for (m = 0; m < dstEntry; m++) {
src = gradWeightBufferData + i * (nRotation * nInputPlane * nOrientation * dstEntry)
+ k * (nInputPlane * nOrientation * dstEntry)
+ j * (nOrientation * dstEntry)
+ m;
elements = factorsData + k * (nOrientation * nOrientation)
+ l * nOrientation;
*target = 0.0f;
for (n = 0; n < nOrientation; n++) {
*target += *(src + n * dstEntry) * elements[n];
}
target++;
}
}
}
}
}
}
else {
bufferData = gradWeightBufferData;
}
target = gradWeightData;
#pragma omp parallel for private(i)
for (i = 0; i < nOutputPlane; i++) {
for (j = 0; j < nInputPlane; j++) {
for (l = 0; l < nOrientation; l++) {
for (m = 0; m < srcEntry; m++) {
for (k = 0; k < nRotation; k++) {
src = bufferData + i * (nRotation * nInputPlane * nOrientation * dstEntry)
+ k * (nInputPlane * nOrientation * dstEntry)
+ j * (nOrientation * dstEntry)
+ l * dstEntry;
elements = indicesData + k * (srcEntry * 8) + m * 8;
*target += *(src + (uint8)elements[1]) * elements[0]
+ *(src + (uint8)elements[3]) * elements[2]
+ *(src + (uint8)elements[5]) * elements[4]
+ *(src + (uint8)elements[7]) * elements[6];
}
target++;
}
}
}
}
return 0;
}
static const struct luaL_Reg orn_(ARF__) [] = {
{"ARF_MappingRotate", orn_(ARF_MappingRotate)},
{"ARF_MappingAlign", orn_(ARF_MappingAlign)},
{"ARF_Rotate", orn_(ARF_Rotate)},
{"ARF_Align", orn_(ARF_Align)},
{NULL, NULL}
};
static void orn_(ARF_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, orn_(ARF__), "orn");
lua_pop(L,1);
}
#endif
|
hello-omp.c | #include <stdio.h>
#include <omp.h>
int main( int argc, char **argv ) {
int tid;
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
printf( "Hello from OMP thread %d\n", tid );
}
return 0;
}
|
GrB_Matrix_serialize.c | //------------------------------------------------------------------------------
// GrB_Matrix_serialize: copy a matrix into a serialized array of bytes
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// serialize a GrB_Matrix into a blob of bytes
// This method is similar to GxB_Matrix_serialize. In contrast with the GrB*
// method, this method requires the user application to allocate the blob
// first, which must be non-NULL on input. The required size of the blob is
// computed by GrB_Matrix_serializeSize. Example usage:
/*
void *blob = NULL ;
GrB_Index blob_size = 0 ;
GrB_Matrix A, B = NULL ;
// construct a matrix A, then serialized it:
GrB_Matrix_serializeSize (&blob_size, A) ; // loose upper bound
blob = malloc (blob_size) ; // user mallocs the blob
GrB_Matrix_serialize (blob, &blob_size, A) ; // returns actual size
blob = realloc (blob, blob_size) ; // user can shrink the blob
GrB_Matrix_deserialize (&B, atype, blob, blob_size) ;
free (blob) ; // user frees the blob
*/
#include "GB.h"
#include "GB_serialize.h"
GrB_Info GrB_Matrix_serialize // serialize a GrB_Matrix to a blob
(
// output:
void *blob, // the blob, already allocated in input
// input/output:
GrB_Index *blob_size_handle, // size of the blob on input. On output,
// the # of bytes used in the blob.
// input:
GrB_Matrix A // matrix to serialize
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Matrix_serialize (blob, &blob_size, A)") ;
GB_BURBLE_START ("GrB_Matrix_serialize") ;
GB_RETURN_IF_NULL (blob) ;
GB_RETURN_IF_NULL (blob_size_handle) ;
GB_RETURN_IF_NULL_OR_FAULTY (A) ;
// no descriptor, so assume the default method
int method = GxB_DEFAULT ;
// Context will hold the default # of threads, which can be controlled
// by GxB_Global_Option_set.
//--------------------------------------------------------------------------
// serialize the matrix into the preallocated blob
//--------------------------------------------------------------------------
size_t blob_size = (size_t) (*blob_size_handle) ;
GrB_Info info = GB_serialize ((GB_void **) &blob, &blob_size, A, method,
Context) ;
if (info == GrB_SUCCESS)
{
(*blob_size_handle) = (GrB_Index) blob_size ;
}
GB_BURBLE_END ;
#pragma omp flush
return (info) ;
}
|
GB_binop__min_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_fp64)
// A*D function (colscale): GB (_AxD__min_fp64)
// D*A function (rowscale): GB (_DxB__min_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__min_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__min_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_fp64)
// C=scalar+B GB (_bind1st__min_fp64)
// C=scalar+B' GB (_bind1st_tran__min_fp64)
// C=A+scalar GB (_bind2nd__min_fp64)
// C=A'+scalar GB (_bind2nd_tran__min_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = fmin (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmin (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = fmin (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = fmin (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmin (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmin (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ellipticStressPartialAxHex3D.c | extern "C"
void FUNC(ellipticStressPartialAxHex3D)(const dlong &Nelements,
const dlong &offset,
const dlong &loffset,
const dlong* __restrict__ elementList,
const dfloat* __restrict__ vgeo,
const dfloat* __restrict__ D,
const dfloat* __restrict__ S,
const dfloat* __restrict__ lambda,
const dfloat* __restrict__ q,
dfloat* __restrict__ Aq)
{
dfloat s_D[p_Nq][p_Nq];
dfloat s_U[p_Nq][p_Nq][p_Nq];
dfloat s_V[p_Nq][p_Nq][p_Nq];
dfloat s_W[p_Nq][p_Nq][p_Nq];
dfloat s_SUr[p_Nq][p_Nq][p_Nq];
dfloat s_SUs[p_Nq][p_Nq][p_Nq];
dfloat s_SUt[p_Nq][p_Nq][p_Nq];
dfloat s_SVr[p_Nq][p_Nq][p_Nq];
dfloat s_SVs[p_Nq][p_Nq][p_Nq];
dfloat s_SVt[p_Nq][p_Nq][p_Nq];
dfloat s_SWr[p_Nq][p_Nq][p_Nq];
dfloat s_SWs[p_Nq][p_Nq][p_Nq];
dfloat s_SWt[p_Nq][p_Nq][p_Nq];
for(int j = 0; j < p_Nq; ++j)
for(int i = 0; i < p_Nq; ++i)
s_D[j][i] = D[j * p_Nq + i];
#ifdef __NEKRS__OMP__
#pragma omp parallel for private(s_U, s_V, s_W, s_SUr, s_SUs, s_SUt, s_SVr, s_SVs, s_SVt, s_SWr, s_SWs, s_SWt)
#endif
for(dlong elem = 0; elem < Nelements; ++elem) {
dlong e = elementList[elem];
for(int k = 0; k < p_Nq; ++k)
for(int j = 0; j < p_Nq; ++j)
for(int i = 0; i < p_Nq; ++i) {
const dlong id = e * p_Np + k * p_Nq * p_Nq + j * p_Nq + i;
s_U[k][j][i] = q[id + 0 * offset];
s_V[k][j][i] = q[id + 1 * offset];
s_W[k][j][i] = q[id + 2 * offset];
}
// loop over slabs
for(int k = 0; k < p_Nq; ++k)
for(int j = 0; j < p_Nq; ++j)
for(int i = 0; i < p_Nq; ++i) {
const dlong gid = i + j * p_Nq + k * p_Nq * p_Nq + e * p_Np * p_Nvgeo;
const dfloat rx = vgeo[gid + p_RXID * p_Np];
const dfloat ry = vgeo[gid + p_RYID * p_Np];
const dfloat rz = vgeo[gid + p_RZID * p_Np];
const dfloat sx = vgeo[gid + p_SXID * p_Np];
const dfloat sy = vgeo[gid + p_SYID * p_Np];
const dfloat sz = vgeo[gid + p_SZID * p_Np];
const dfloat tx = vgeo[gid + p_TXID * p_Np];
const dfloat ty = vgeo[gid + p_TYID * p_Np];
const dfloat tz = vgeo[gid + p_TZID * p_Np];
const dfloat JW = vgeo[gid + p_JWID * p_Np];
// compute 1D derivatives
dfloat ur = 0.f, us = 0.f, ut = 0.f;
dfloat vr = 0.f, vs = 0.f, vt = 0.f;
dfloat wr = 0.f, ws = 0.f, wt = 0.f;
for(int m = 0; m < p_Nq; ++m) {
const dfloat Dim = s_D[i][m]; // Dr
const dfloat Djm = s_D[j][m]; // Ds
const dfloat Dkm = s_D[k][m]; // Dt
ur += Dim * s_U[k][j][m];
us += Djm * s_U[k][m][i];
ut += Dkm * s_U[m][j][i];
//
vr += Dim * s_V[k][j][m];
vs += Djm * s_V[k][m][i];
vt += Dkm * s_V[m][j][i];
//
wr += Dim * s_W[k][j][m];
ws += Djm * s_W[k][m][i];
wt += Dkm * s_W[m][j][i];
}
const dlong id = e * p_Np + k * p_Nq * p_Nq + j * p_Nq + i;
const dfloat u_lam0 = lambda[0 * offset + 0 * loffset];
const dfloat v_lam0 = lambda[0 * offset + 1 * loffset];
const dfloat w_lam0 = lambda[0 * offset + 2 * loffset];
const dfloat dudx = rx * ur + sx * us + tx * ut;
const dfloat dudy = ry * ur + sy * us + ty * ut;
const dfloat dudz = rz * ur + sz * us + tz * ut;
const dfloat dvdx = rx * vr + sx * vs + tx * vt;
const dfloat dvdy = ry * vr + sy * vs + ty * vt;
const dfloat dvdz = rz * vr + sz * vs + tz * vt;
const dfloat dwdx = rx * wr + sx * ws + tx * wt;
const dfloat dwdy = ry * wr + sy * ws + ty * wt;
const dfloat dwdz = rz * wr + sz * ws + tz * wt;
const dfloat s11 = u_lam0 * JW * (dudx + dudx);
const dfloat s12 = u_lam0 * JW * (dudy + dvdx);
const dfloat s13 = u_lam0 * JW * (dudz + dwdx);
const dfloat s21 = v_lam0 * JW * (dvdx + dudy);
const dfloat s22 = v_lam0 * JW * (dvdy + dvdy);
const dfloat s23 = v_lam0 * JW * (dvdz + dwdy);
const dfloat s31 = w_lam0 * JW * (dwdx + dudz);
const dfloat s32 = w_lam0 * JW * (dwdy + dvdz);
const dfloat s33 = w_lam0 * JW * (dwdz + dwdz);
s_SUr[k][j][i] = rx * s11 + ry * s12 + rz * s13;
s_SUs[k][j][i] = sx * s11 + sy * s12 + sz * s13;
s_SUt[k][j][i] = tx * s11 + ty * s12 + tz * s13;
//
s_SVr[k][j][i] = rx * s21 + ry * s22 + rz * s23;
s_SVs[k][j][i] = sx * s21 + sy * s22 + sz * s23;
s_SVt[k][j][i] = tx * s21 + ty * s22 + tz * s23;
//
s_SWr[k][j][i] = rx * s31 + ry * s32 + rz * s33;
s_SWs[k][j][i] = sx * s31 + sy * s32 + sz * s33;
s_SWt[k][j][i] = tx * s31 + ty * s32 + tz * s33;
}
// loop over slabs
for(int k = 0; k < p_Nq; ++k)
for(int j = 0; j < p_Nq; ++j)
for(int i = 0; i < p_Nq; ++i) {
dfloat r_Au = 0.f, r_Av = 0.f, r_Aw = 0.f;
for(int m = 0; m < p_Nq; m++) {
const dfloat Dim = s_D[m][i]; // Dr'
const dfloat Djm = s_D[m][j]; // Ds'
const dfloat Dkm = s_D[m][k]; // Dt'
r_Au += Dim * s_SUr[k][j][m];
r_Au += Djm * s_SUs[k][m][i];
r_Au += Dkm * s_SUt[m][j][i];
r_Av += Dim * s_SVr[k][j][m];
r_Av += Djm * s_SVs[k][m][i];
r_Av += Dkm * s_SVt[m][j][i];
r_Aw += Dim * s_SWr[k][j][m];
r_Aw += Djm * s_SWs[k][m][i];
r_Aw += Dkm * s_SWt[m][j][i];
}
const dlong id = e * p_Np + k * p_Nq * p_Nq + j * p_Nq + i;
const dfloat u_lam1 = lambda[1 * offset + 0 * loffset];
const dfloat v_lam1 = lambda[1 * offset + 1 * loffset];
const dfloat w_lam1 = lambda[1 * offset + 2 * loffset];
const dlong gid = i + j * p_Nq + k * p_Nq * p_Nq + e * p_Np * p_Nvgeo;
const dfloat JW = vgeo[gid + p_JWID * p_Np];
Aq[id + 0 * offset] = r_Au + u_lam1 * JW * s_U[k][j][i];
Aq[id + 1 * offset] = r_Av + v_lam1 * JW * s_V[k][j][i];
Aq[id + 2 * offset] = r_Aw + w_lam1 * JW * s_W[k][j][i];
}
}
}
|
triangle_mesh.h | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "saiga/core/geometry/aabb.h"
#include "saiga/core/geometry/triangle.h"
#include "saiga/core/geometry/vertex.h"
#include "saiga/core/math/math.h"
#include "saiga/core/util/assert.h"
#include "Mesh.h"
#include <algorithm>
#include <cstring>
#include <numeric>
namespace Saiga
{
/*
* Data structur for simple triangle meshes.
* Can be turned into a IndexedVertexBuffer for drawing with OpenGL
*/
template <typename vertex_t, typename index_t>
class TriangleMesh : public Mesh<vertex_t>
{
public:
using VertexType = vertex_t;
using IndexType = index_t;
using Base = Mesh<vertex_t>;
using Base::aabb;
using Base::addVertex;
using Base::size;
using Base::vertices;
struct SAIGA_ALIGN(4) Face
{
index_t v1, v2, v3;
Face() {}
Face(const index_t& v1, const index_t& v2, const index_t& v3) : v1(v1), v2(v2), v3(v3) {}
index_t& operator[](int idx)
{
// assume index_t alignment
return *((&v1) + idx);
}
const index_t& operator[](int idx) const
{
// assume index_t alignment
return *((&v1) + idx);
}
};
void transformNormal(const mat4& trafo);
/*
* Deletes all vertices and faces.
*/
void clear()
{
Base::clear();
faces.resize(0);
}
/*
* Adds face to mesh.
* The indices of the face should match existing vertices
* return: index of new face
*/
int addFace(const Face& f)
{
faces.push_back(f);
return faces.size() - 1;
}
int addFace(index_t f[3]) { return addFace(Face(f[0], f[1], f[2])); }
int addFace(index_t v0, index_t v1, index_t v2) { return addFace(Face(v0, v1, v2)); }
/*
* Adds given vertices and the 2 corresponding triangles to mesh
*/
void addQuad(vertex_t verts[4]);
void addTriangle(vertex_t verts[3]);
/*
* Adds 2 Triangles given by 4 vertices and form a quad.
* The vertices should be orderd counter clockwise
*/
void addQuad(index_t inds[4]);
/*
* Subdivides the triangle at index 'face' into 4 triangles.
* The new triangles will be added to the mesh and the old will be overwritten
*/
void subdivideFace(int face);
/*
* Inverts the triangle at index 'face'.
* The order of the indices will be reversed.
*/
void invertFace(int face);
void invertMesh();
/*
* Converts the index face data structur to a simple triangle list.
*/
void toTriangleList(std::vector<Triangle>& output);
/*
* Adds the complete mesh 'other' to the current mesh.
*/
void addMesh(const TriangleMesh<vertex_t, index_t>& other);
template <typename mesh_vertex_t, typename mesh_index_t>
void addMesh(const TriangleMesh<mesh_vertex_t, mesh_index_t>& other);
/**
* Computes the per vertex normal by weighting each face normal by its surface area.
*/
void computePerVertexNormal();
/**
* Removes all vertices that are not referenced by a triangle.
* Computes the new vertex indices for each triangle.
*/
void removeUnusedVertices();
/**
* Computes the size in bytes for this triangle mesh.
*/
size_t size();
void free();
int numIndices() { return faces.size() * 3; }
bool isValid();
/**
* Sorts the vertices by (x,y,z) lexical.
* The face indices are correct to match the new vertices.
*/
void sortVerticesByPosition();
/**
* Removes subsequent vertices if they have identical position.
* It make sense to call it after 'sortVerticesByPosition'.
*
* The face indices are updated accordingly.
*/
void removeSubsequentDuplicates();
/**
* Removes all triangles, which reference a vertex twice
*/
void removeDegenerateFaces();
template <typename v, typename i>
friend std::ostream& operator<<(std::ostream& os, const TriangleMesh<v, i>& dt);
std::vector<index_t> getIndexList()
{
std::vector<index_t> indices(numIndices());
std::copy(&faces[0].v1, &faces[0].v1 + numIndices(), indices.data());
return indices;
}
/**
* Writes this mesh in OFF format to the given output stream.
*/
void saveMeshOff(std::ostream& strm);
public:
// std::vector<vertex_t> vertices;
std::vector<Face> faces;
};
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::transformNormal(const mat4& trafo)
{
for (vertex_t& v : vertices)
{
vec4 p = make_vec4(make_vec3(v.normal), 0);
p = trafo * p;
v.normal = make_vec4(make_vec3(p), v.normal[3]);
}
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::addQuad(vertex_t verts[])
{
int index = vertices.size();
for (int i = 0; i < 4; i++)
{
addVertex(verts[i]);
}
faces.push_back(Face(index, index + 1, index + 2));
faces.push_back(Face(index, index + 2, index + 3));
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::addTriangle(vertex_t verts[])
{
int index = vertices.size();
for (int i = 0; i < 3; i++)
{
addVertex(verts[i]);
}
faces.push_back(Face(index, index + 1, index + 2));
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::addQuad(index_t inds[])
{
faces.push_back(Face(inds[0], inds[1], inds[2]));
faces.push_back(Face(inds[2], inds[3], inds[0]));
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::subdivideFace(int f)
{
Face face = faces[f];
#define _TM_POS(xs) (vertices[face.xs].position)
// create 3 new vertices in the middle of the edges
int v1 = addVertex(vertex_t(vec4((_TM_POS(v1) + _TM_POS(v2)) / 2.0f)));
int v2 = addVertex(vertex_t(vec4((_TM_POS(v1) + _TM_POS(v3)) / 2.0f)));
int v3 = addVertex(vertex_t(vec4((_TM_POS(v2) + _TM_POS(v3)) / 2.0f)));
faces.push_back(Face(face.v2, v3, v1));
faces.push_back(Face(face.v3, v2, v3));
faces.push_back(Face(v1, v3, v2));
faces[f] = Face(face.v1, v1, v2);
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::invertFace(int f)
{
Face& face = faces[f];
Face face2;
face2.v1 = face.v3;
face2.v2 = face.v2;
face2.v3 = face.v1;
face = face2;
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::invertMesh()
{
for (Face& face : faces)
{
Face face2;
face2.v1 = face.v3;
face2.v2 = face.v2;
face2.v3 = face.v1;
face = face2;
}
for (vertex_t& v : vertices)
{
v.normal = -v.normal;
}
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::toTriangleList(std::vector<Triangle>& output)
{
Triangle t;
for (Face& f : faces)
{
t.a = make_vec3(vertices[f.v1].position);
t.b = make_vec3(vertices[f.v2].position);
t.c = make_vec3(vertices[f.v3].position);
output.push_back(t);
}
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::addMesh(const TriangleMesh<vertex_t, index_t>& other)
{
int oldVertexCount = this->vertices.size();
for (vertex_t v : other.vertices)
{
this->vertices.push_back(v);
}
for (Face f : other.faces)
{
f.v1 += oldVertexCount;
f.v2 += oldVertexCount;
f.v3 += oldVertexCount;
this->addFace(f);
}
}
template <typename vertex_t, typename index_t>
template <typename mesh_vertex_t, typename mesh_index_t>
void TriangleMesh<vertex_t, index_t>::addMesh(const TriangleMesh<mesh_vertex_t, mesh_index_t>& other)
{
int oldVertexCount = this->vertices.size();
for (vertex_t v : other.vertices)
{
this->vertices.push_back(v);
}
for (auto f : other.faces)
{
f.v1 += oldVertexCount;
f.v2 += oldVertexCount;
f.v3 += oldVertexCount;
this->addFace(f.v1, f.v2, f.v3);
}
}
template <typename vertex_t, typename index_t>
size_t TriangleMesh<vertex_t, index_t>::size()
{
return faces.capacity() * sizeof(Face) + vertices.capacity() * sizeof(vertex_t) +
sizeof(TriangleMesh<vertex_t, index_t>);
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::free()
{
faces.clear();
faces.shrink_to_fit();
vertices.clear();
vertices.shrink_to_fit();
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::computePerVertexNormal()
{
//#pragma omp parallel for
for (int i = 0; i < (int)vertices.size(); ++i)
{
// Note:
// We keep the original w value intact, because it might be used
// by the application.
vec3 n = make_vec3(0);
vertices[i].normal = make_vec4(n, vertices[i].normal[3]);
}
//#pragma omp parallel for
for (int i = 0; i < (int)faces.size(); ++i)
{
Face& f = faces[i];
vec3 a = make_vec3(vertices[f.v1].position);
vec3 b = make_vec3(vertices[f.v2].position);
vec3 c = make_vec3(vertices[f.v3].position);
vec3 n = cross(b - a, c - a);
// Note: do not normalize here because the length is the surface area
vertices[f.v1].normal += make_vec4(n, 0);
vertices[f.v2].normal += make_vec4(n, 0);
vertices[f.v3].normal += make_vec4(n, 0);
}
//#pragma omp parallel for
for (int i = 0; i < (int)vertices.size(); ++i)
{
vec3 n = normalize(make_vec3(vertices[i].normal));
vertices[i].normal = make_vec4(n, vertices[i].normal[3]);
}
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::removeUnusedVertices()
{
std::vector<int> vmap(vertices.size(), -1);
auto vcopy = vertices;
vertices.clear();
for (int i = 0; i < (int)faces.size(); ++i)
{
auto& f = faces[i];
for (int i = 0; i < 3; ++i)
{
auto& v = f[i];
if (vmap[v] == -1)
{
int count = vertices.size();
vmap[v] = count;
vertices.push_back(vcopy[v]);
}
v = vmap[v];
}
}
}
template <typename vertex_t, typename index_t>
bool TriangleMesh<vertex_t, index_t>::isValid()
{
// check if all referenced vertices exist
for (Face f : faces)
{
if (f.v1 < 0 || f.v1 >= vertices.size()) return false;
if (f.v2 < 0 || f.v2 >= vertices.size()) return false;
if (f.v3 < 0 || f.v3 >= vertices.size()) return false;
}
return true;
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::sortVerticesByPosition()
{
std::vector<int> tmp_indices(vertices.size());
std::vector<int> tmp_indices2(vertices.size());
std::iota(tmp_indices.begin(), tmp_indices.end(), 0);
std::sort(tmp_indices.begin(), tmp_indices.end(), [&](int a, int b) {
auto p1 = vertices[a].position;
auto p2 = vertices[b].position;
return std::tie(p1[0], p1[1], p1[2]) < std::tie(p2[0], p2[1], p2[2]);
});
std::vector<vertex_t> new_vertices(vertices.size());
for (int i = 0; i < (int)new_vertices.size(); ++i)
{
new_vertices[i] = vertices[tmp_indices[i]];
tmp_indices2[tmp_indices[i]] = i;
}
for (auto& f : faces)
{
f.v1 = tmp_indices2[f.v1];
f.v2 = tmp_indices2[f.v2];
f.v3 = tmp_indices2[f.v3];
}
vertices.swap(new_vertices);
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::removeSubsequentDuplicates()
{
if (vertices.size() <= 1) return;
std::vector<int> tmp_indices(vertices.size());
std::vector<bool> valid(vertices.size(), false);
std::vector<vertex_t> new_vertices;
int currentIdx = -1;
vec4 currentPos;
for (int i = 0; i < vertices.size(); ++i)
{
auto p = vertices[i].position;
if (p != currentPos || i == 0)
{
new_vertices.push_back(vertices[i]);
currentIdx++;
currentPos = p;
valid[i] = true;
}
tmp_indices[i] = currentIdx;
}
for (int i = 0; i < (int)vertices.size(); ++i)
{
if (valid[i]) new_vertices[tmp_indices[i]] = vertices[i];
}
for (auto& f : faces)
{
f.v1 = tmp_indices[f.v1];
f.v2 = tmp_indices[f.v2];
f.v3 = tmp_indices[f.v3];
}
vertices.swap(new_vertices);
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::removeDegenerateFaces()
{
faces.erase(std::remove_if(faces.begin(), faces.end(),
[](const Face& f) { return f.v1 == f.v2 || f.v1 == f.v3 || f.v2 == f.v3; }),
faces.end());
}
template <typename vertex_t, typename index_t>
void TriangleMesh<vertex_t, index_t>::saveMeshOff(std::ostream& strm)
{
strm << "OFF" << std::endl;
// first line: number of vertices, number of faces, number of edges (can be ignored)
strm << vertices.size() << " " << faces.size() << " 0" << std::endl;
for (auto& v : vertices)
{
strm << v.position[0] << " " << v.position[1] << " " << v.position[2] << std::endl;
}
for (auto& f : faces)
{
strm << "3"
<< " " << f[0] << " " << f[1] << " " << f[2] << std::endl;
}
}
template <typename vertex_t, typename index_t>
std::ostream& operator<<(std::ostream& os, const TriangleMesh<vertex_t, index_t>& dt)
{
os << "TriangleMesh. V=" << dt.vertices.size() << " F=" << dt.faces.size();
return os;
}
} // namespace Saiga
|
gmm_uborder_fun.c | /*
*
* gmm_uborder_fun.c
*
* Code generation for function 'gmm_uborder_fun'
*
*/
/* Include files */
#include "gmm_uborder_fun.h"
#include "fetch_thresholds.h"
#include "fetch_thresholds_emxutil.h"
#include "rt_nonfinite.h"
#include <math.h>
/* Function Definitions */
double __anon_fcn(const double mu[2], const double sig[2], const double amp[2],
double x)
{
double varargout_1;
double varargin_1[2];
normpdfs(x, mu, sig, amp, varargin_1);
if ((varargin_1[0] < varargin_1[1]) || (rtIsNaN(varargin_1[0]) && (!rtIsNaN
(varargin_1[1])))) {
varargout_1 = varargin_1[1];
} else {
varargout_1 = varargin_1[0];
}
return varargout_1;
}
/*
* function [ vals ] = normpdfs(x, mu, sig, amp)
*/
void b_normpdfs(double x, const emxArray_real_T *mu, const emxArray_real_T *sig,
const emxArray_real_T *amp, emxArray_real_T *vals)
{
int ub_loop;
int loop_ub;
int i;
double t;
/* 'gmm_uborder_fun:14' n = length(mu); */
/* 'gmm_uborder_fun:15' vals = NaN(n, length(x)); */
ub_loop = vals->size[0];
vals->size[0] = mu->size[0];
emxEnsureCapacity_real_T(vals, ub_loop);
loop_ub = mu->size[0];
for (ub_loop = 0; ub_loop < loop_ub; ub_loop++) {
vals->data[ub_loop] = rtNaN;
}
ub_loop = mu->size[0] - 1;
#pragma omp parallel for \
num_threads(omp_get_max_threads()) \
private(t)
for (i = 0; i <= ub_loop; i++) {
/* 'gmm_uborder_fun:17' vals(i, :) = amp(i) * normpdf(x, mu(i), sig(i)); */
if (sig->data[i] > 0.0) {
t = (x - mu->data[i]) / sig->data[i];
t = exp(-0.5 * t * t) / (2.5066282746310002 * sig->data[i]);
} else {
t = rtNaN;
}
vals->data[i] = amp->data[i] * t;
}
}
/*
* function [ vals ] = normpdfs(x, mu, sig, amp)
*/
void normpdfs(double x, const double mu[2], const double sig[2], const double
amp[2], double vals[2])
{
int i;
double t;
/* 'gmm_uborder_fun:14' n = length(mu); */
/* 'gmm_uborder_fun:15' vals = NaN(n, length(x)); */
#pragma omp parallel for \
num_threads(omp_get_max_threads()) \
private(t)
for (i = 0; i < 2; i++) {
/* 'gmm_uborder_fun:17' vals(i, :) = amp(i) * normpdf(x, mu(i), sig(i)); */
if (sig[i] > 0.0) {
t = (x - mu[i]) / sig[i];
t = exp(-0.5 * t * t) / (2.5066282746310002 * sig[i]);
} else {
t = rtNaN;
}
vals[i] = amp[i] * t;
}
}
/* End of code generation (gmm_uborder_fun.c) */
|
GB_binop__ne_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_uint8)
// A*D function (colscale): GB (_AxD__ne_uint8)
// D*A function (rowscale): GB (_DxB__ne_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_uint8)
// C=scalar+B GB (_bind1st__ne_uint8)
// C=scalar+B' GB (_bind1st_tran__ne_uint8)
// C=A+scalar GB (_bind2nd__ne_uint8)
// C=A'+scalar GB (_bind2nd_tran__ne_uint8)
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT8 || GxB_NO_NE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
PVMappingFilterTime24h.h | /* * MIT License
*
* © ESI Group, 2015
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
*
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
*
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef PVFILTER_PVMAPPINGFILTERTIME24H_H
#define PVFILTER_PVMAPPINGFILTERTIME24H_H
#include <inendi/PVMappingFilter.h>
#include <pvkernel/rush/PVNraw.h>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <unicode/calendar.h>
#include <pvcop/types/datetime.h>
#include <omp.h>
using namespace icu_67;
namespace Inendi
{
class PVMappingFilterTime24h : public PVMappingFilter
{
public:
PVMappingFilterTime24h();
public:
pvcop::db::array operator()(PVCol const col, PVRush::PVNraw const& nraw) override
{
auto f = nraw.column(col).formatter();
const pvcop::db::array& array = nraw.column(col);
pvcop::db::array dest("number_uint32", array.size());
auto& dest_array = dest.to_core_array<uint32_t>();
if (std::string(f->name()) == "datetime") {
auto& core_array = array.to_core_array<uint32_t>();
#pragma omp parallel for
for (size_t row = 0; row < array.size(); row++) {
tm local_tm;
const time_t t = static_cast<int64_t>(core_array[row]);
pvcop::types::formatter_datetime::gmtime_r(&t, &local_tm);
dest_array[row] =
(((local_tm.tm_hour * 60) + local_tm.tm_min) * 60 + local_tm.tm_sec) * 1000;
}
} else if (std::string(f->name()) == "datetime_us") {
auto& core_array = array.to_core_array<boost::posix_time::ptime>();
#pragma omp parallel for
for (size_t row = 0; row < array.size(); row++) {
const boost::posix_time::ptime t = core_array[row];
const auto& tod = t.time_of_day();
dest_array[row] = (tod.total_seconds() * 1000) + (tod.fractional_seconds() / 1000);
}
} else {
assert(std::string(f->name()) == "datetime_ms" && "Unknown datetime formatter");
auto& core_array = array.to_core_array<uint64_t>();
std::vector<std::unique_ptr<Calendar>> calendars;
for (size_t i = 0; i < (size_t)omp_get_max_threads(); i++) {
UErrorCode err = U_ZERO_ERROR;
calendars.emplace_back(Calendar::createInstance(err));
if (not U_SUCCESS(err)) {
throw std::runtime_error("Can't create calendar to compute mapping");
}
}
#pragma omp parallel
{
std::unique_ptr<Calendar>& cal = calendars[omp_get_thread_num()];
UErrorCode err = U_ZERO_ERROR;
#pragma omp for
for (size_t row = 0; row < array.size(); row++) {
cal->setTime(core_array[row], err);
if (not U_SUCCESS(err)) {
continue;
}
int32_t millisec = cal->get(UCAL_MILLISECOND, err);
if (not U_SUCCESS(err)) {
continue;
}
int32_t sec = cal->get(UCAL_SECOND, err);
if (not U_SUCCESS(err)) {
continue;
}
int32_t min = cal->get(UCAL_MINUTE, err);
if (not U_SUCCESS(err)) {
continue;
}
int32_t hour = cal->get(UCAL_HOUR_OF_DAY, err);
if (not U_SUCCESS(err)) {
continue;
}
dest_array[row] = ((sec + (min * 60) + (hour * 60 * 60)) * 1000) + millisec;
}
}
}
return dest;
}
std::unordered_set<std::string> list_usable_type() const override { return {"time"}; }
QString get_human_name() const override { return QString("24h"); }
pvcop::db::array get_minmax(pvcop::db::array const&, pvcop::db::selection const&) const override
{
pvcop::db::array res("number_uint32", 2);
auto res_array = res.to_core_array<uint32_t>();
res_array[0] = 0;
res_array[1] = (24 * 60 * 60 * 1000) - 1;
return res;
}
CLASS_FILTER_NOPARAM(PVMappingFilterTime24h)
};
}
#endif
|
replicate_model_part_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
//
#ifndef KRATOS_REPLICATE_MODEL_PART_UTILITY
#define KRATOS_REPLICATE_MODEL_PART_UTILITY
// System includes
// External includes
// Project includes
#include "includes/model_part.h"
namespace Kratos
{
///@addtogroup ShallowWaterApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @ingroup ShallowWaterApplication
* @class ReplicateModelPartUtility
* @brief This utility replicates a model part to print the topography in the post-process
*/
class KRATOS_API(SHALLOW_WATER_APPLICATION) ReplicateModelPartUtility
{
public:
///@name Type Definitions
///@{
typedef std::size_t IndexType;
typedef Geometry<Node<3>>::PointsArrayType NodesArrayType;
/// Pointer definition of ReplicateModelPartUtility
KRATOS_CLASS_POINTER_DEFINITION(ReplicateModelPartUtility);
///@}
///@name Life Cycle
///@{
/// Constructor.
ReplicateModelPartUtility(ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, bool ReplicateSubModelParts = false);
/// Destructor.
~ReplicateModelPartUtility() = default;
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief It creates a copy of the origin model part and all the nodes, elements, conditions, properties, it also copies the ProcessInfo.
*/
void Replicate();
/**
* @brief This method copies a variable from the origin model part to the destination model part for post-process purpose.
*/
template<class TVarType>
void TransferVariable(const TVarType& rVariable)
{
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mrOriginModelPart.NumberOfNodes()); ++i)
{
const auto it_node = mrOriginModelPart.NodesBegin() + i;
const auto dest_node = mReplicatedNodesMap[it_node->Id()];
dest_node->FastGetSolutionStepValue(rVariable) = it_node->FastGetSolutionStepValue(rVariable);
}
}
/**
* @brief This method copies a variable from the origin model part to the destination model part for post-process purpose.
*/
template<class TVarType>
void TransferNonHistoricalVariable(const TVarType& rVariable)
{
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(mrOriginModelPart.NumberOfNodes()); ++i)
{
const auto it_node = mrOriginModelPart.NodesBegin() + i;
const auto dest_node = mReplicatedNodesMap[it_node->Id()];
dest_node->SetValue(rVariable, it_node->GetValue(rVariable));
}
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const
{
return "ReplicateModelPartUtility";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const {}
///@}
///@name Friends
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart& mrOriginModelPart;
ModelPart& mrDestinationModelPart;
const bool mReplicateSubModelParts;
std::unordered_map<IndexType, Node<3>::Pointer> mReplicatedNodesMap;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
void GetMaximumIds(IndexType& rUniqueNodeId, IndexType& rUniqueElemId, IndexType& rUniqueCondId, IndexType& rUniquePropId);
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
ReplicateModelPartUtility& operator=(ReplicateModelPartUtility const& rOther) = delete;
/// Copy constructor.
ReplicateModelPartUtility(ReplicateModelPartUtility const& rOther) = delete;
///@}
}; // Class ReplicateModelPartUtility
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
ReplicateModelPartUtility& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const ReplicateModelPartUtility& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_REPLICATE_MODEL_PART_UTILITY defined
|
CGAL_normEst.h | /* License Information
*
* Copyright (C) 2012 Boulch Alexandre, Ecole Nationale des Ponts et Chaussees -
* Ecole des Ponts ParisTech
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Note that this library relies on external libraries subject to their own license.
* To use this software, you are subject to the dependencies license, these licenses applies to the dependency
* ONLY and NOT this code.
* Please refer below to the web sites for license informations.
*
* OPENMP (http://openmp.org/)
* CGAL (http://www.cgal.org/) see CGAL Licence Term
*/
#ifndef NORM_EST_CGAL_OMP_H
#define NORM_EST_CGAL_OMP_H
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
#include <omp.h>
#endif
#include <CGAL/Simple_cartesian.h>
#include <CGAL/Point_3.h>
#include <CGAL/Aff_transformation_3.h>
#include <CGAL/Vector_3.h>
#include <CGAL/Orthogonal_k_neighbor_search.h>
#include <CGAL/Kd_tree.h>
#include <CGAL/Search_traits_3.h>
#include <CGAL/Fuzzy_sphere.h>
#include <iostream>
#include <time.h>
#include <vector>
#include <map>
/*!
* \file CGAL_normEst.h
* \brief Normal Estimator for point clouds
* \author Alexandre Boulch
* \version 0.1
*
* Normal estimator based on the publication from SGP2012
* "Fast and Robust Normal Estimator for Point Clouds"
*/
/*!
* \class CGAL_Normal_Estimator
* \brief Class grouping different variant of the algorithm
*/
class CGAL_Normal_Estimator{
private:
public:
/*!
* \class My_Triplet
* \brief simple class for triplet of points
*/
template <typename T>
class My_Triplet{
private:
T data[3];
public:
My_Triplet(){};
My_Triplet(T a, T b, T c){data[0] = a;data[1] = b;data[2] = c;}
T& operator()(int i){return data[i];}
T operator()(int i) const {return data[i];}
};
typedef CGAL::Simple_cartesian<float> Kernel;
typedef typename CGAL::Point_3<Kernel> Point3;
typedef typename CGAL::Vector_3<Kernel> Vector3;
typedef typename CGAL::Aff_transformation_3<Kernel> Matrix3;
typedef typename CGAL::Search_traits_3<Kernel> TreeTraits;
typedef My_Triplet<Point3> Triplet;
typedef typename CGAL::Orthogonal_k_neighbor_search<TreeTraits> Neighbor_search;
typedef typename CGAL::Kd_tree<TreeTraits> Tree;
typedef typename CGAL::Fuzzy_sphere<TreeTraits> Fuzzy_sphere;
typedef typename std::vector<Point3>::iterator vecPt3Iterator;
typedef typename std::vector<Vector3>::iterator vecVec3Iterator;
typedef typename std::vector<Triplet>::iterator vecTripIterator;
typedef typename Neighbor_search::iterator Neighbor_search_iterator;
enum{MEAN=0, /*!<MEAN value 0, used for normal computation by mean*/
BEST=1, /*!<BEST value 1, used for normal computation by best confidence*/
CLUSTER=2, /*!<CLUSTER value 2, used for normal computation by clustering*/
POINTS=100, /*<POINTS value 100, used for method choice, triplets by random selection in the neighborhood*/
UNIF=101, /*!<UNIF value 101, used for method choice, triplets by uniform selection in the neighborhood sphere*/
CUBES=102,/*!<CUBES value 102, used for method choice, triplets by selection with cubes discretization*/
KNN=200,/*!<KNN value 200, neighborhood selection type, k-nearest neighbors*/
RADIUS=201 /*!<RADIUS value 201, neighborhood selection type, range search*/
};
/*!
* \brief Constructor
* @param points
* @param normals
*/
CGAL_Normal_Estimator(std::vector<Point3> &points, std::vector<Vector3> &normals):
pts(points),nls(normals){
set_default_parameters();
}
int& number_of_planes(){return n_planes;}
int number_of_planes() const{return n_planes;}
int& accum_slices(){return n_phi;}
int accum_slices() const{return n_phi;}
int& rotation_number(){return n_rot;}
int rotation_number() const{return n_rot;}
int& normal_selection_mode(){return selection_type;}
int normal_selection_mode() const{return selection_type;}
float& cluster_angle_rad(){return tol_angle_rad;}
float cluster_angle_rad() const{return tol_angle_rad;}
int& minimal_neighbor_number_for_range_search(){return lower_neighbor_bound_neighbors;}
int minimal_neighbor_number_for_range_search() const{return lower_neighbor_bound_neighbors;}
float& small_radius_fact(){return small_radius_factor;}
float small_radius_fact() const{return small_radius_factor;}
int& number_of_cubes(){return n_cubes;}
int number_of_cubes() const{return n_cubes;}
std::vector<Point3>& point_cloud(){return pts;}
std::vector<Point3> point_cloud()const{return pts;}
std::vector<Vector3>& normal_cloud(){return nls;}
std::vector<Vector3> normal_cloud() const{return nls;}
void estimate(int method=POINTS, int neighborhood_type=KNN, float neighborhood_size=200){
std::cout << "Normal_Estimation ";
switch(method){
case POINTS:
std::cout << "Points ";
switch(neighborhood_type){
case KNN:
std::cout << "Knn=";
std::cout << (int)neighborhood_size << std::endl;
points_knn((int)neighborhood_size);
break;
case RADIUS:
std::cout << "radius=";
std::cout << neighborhood_size << std::endl;
points_radius(neighborhood_size);
break;
default:
std::cout << "Parameter Error : bad neighborhood type" << std::endl;
break;
}
break;
case UNIF:
std::cout << "Unif ";
switch(neighborhood_type){
case KNN:
std::cout << "Knn=";
std::cout << (int)neighborhood_size << std::endl;
unif_knn((int)neighborhood_size);
break;
case RADIUS:
std::cout << "radius=";
std::cout << neighborhood_size << std::endl;
unif_radius(neighborhood_size);
break;
default:
std::cout << "Parameter Error : bad neighborhood type" << std::endl;
break;
}
break;
case CUBES:
std::cout << "Cubes ";
switch(neighborhood_type){
case KNN:
std::cout << "Knn=";
std::cout << (int)neighborhood_size << std::endl;
cubes_knn((int)neighborhood_size);
break;
case RADIUS:
std::cout << "radius=";
std::cout << neighborhood_size << std::endl;
cubes_radius(neighborhood_size);
break;
default:
std::cout << "Parameter Error : bad neighborhood type" << std::endl;
break;
}
break;
default:
std::cout << "Parameter Error : bad method" << std::endl;
break;
}
}
private:
float PI;/*!<Pi value used in the class*/
int lower_neighbor_bound_neighbors; /*!<lower_neighbor_bound_neighbors minimal number of neighbors in radius search*/
int n_planes;/*!< Plane number to draw*/
int n_phi;/*!< Accumulator discretization parameter*/
int n_rot;/*!< Rotation number*/
float tol_angle_rad;/*!< Angle parameter for cluster normal selection*/
float small_radius_factor;/*!< Factor such that small_radius (for uniform search) = big_radius / small_radius_factor*/
int n_cubes;/*!< Number of cubes in one dimension*/
int selection_type;/*!< Type of selection of normals (1: best, 2: cluster, default: mean)*/
std::vector<Point3>& pts;/*!< Point cloud*/
std::vector<Vector3>& nls;/*!< Normal cloud*/
/*!
* Function to set the default parameters (hard coded)
*/
void set_default_parameters(){
PI=3.14159265f;
n_planes=700;
n_rot=5;
n_phi=15;
tol_angle_rad=0.79;
small_radius_factor=4;
n_cubes=4;
lower_neighbor_bound_neighbors=10;
selection_type= CLUSTER;
}
/*!
* fills a vector of random rotation matrix and their inverse
* @param rotMat : table matrices to fill with rotations
* @param rotMatInv : table matrices to fill with inverse rotations
* @param rotations : number of rotations
*/
inline void generate_rotation_matrix(std::vector<Matrix3> &rotMat, std::vector<Matrix3> &rotMatInv, int rotations)
{
rotMat.clear();
rotMatInv.clear();
if(rotations==0){
Matrix3 rMat(1,0,0,0,1,0,0,0,1);
rotMat.push_back(rMat);
rotMatInv.push_back(rMat);
}else{
for(int i=0; i<rotations; i++){
float theta = (rand()+0.f)/RAND_MAX * 2* 3.14159265f;
float phi = (rand()+0.f)/RAND_MAX * 2* 3.14159265f;
float psi = (rand()+0.f)/RAND_MAX * 2* 3.14159265f;
Matrix3 Rt(1, 0, 0,0, cos(theta), -sin(theta), 0, sin(theta), cos(theta));
Matrix3 Rph(cos(phi),0, sin(phi),0,1,0,-sin(phi),0, cos(phi));
Matrix3 Rps(cos(psi), -sin(psi), 0, sin(psi), cos(psi),0,0,0,1);
Matrix3 Rtinv(1, 0, 0,0, cos(theta) , sin(theta),0, -sin(theta), cos(theta));
Matrix3 Rphinv(cos(phi) , 0, -sin(phi),0, 1, 0,sin(phi), 0, cos(phi));
Matrix3 Rpsinv(cos(psi) , sin(psi), 0, -sin(psi), cos(psi), 0, 0, 0, 1);
Matrix3 rMat = Rt*Rph*Rps;
Matrix3 rMatInv = Rpsinv*Rphinv*Rtinv;
rotMat.push_back(rMat);
rotMatInv.push_back(rMatInv);
}
}
}
/*!
* fills a vector of random points in the unit ball
* @param points : table of points to fill with random points
* @param point_number : number of points
*/
inline void generate_random_points_vector(std::vector<Point3> &points, int point_number){
points.resize(point_number);
for(int i=0; i<point_number; i++){
float x,y,z;
do{
x = ((rand()+0.f)/RAND_MAX)*2-1;
y = ((rand()+0.f)/RAND_MAX)*2-1;
z = ((rand()+0.f)/RAND_MAX)*2-1;
}while(x*x + y*y +z*z >1);
points[i]=Point3(x,y,z);
}
}
/*!
* fills a vector with random int
* @param vecInt : table of interger to fill with random int
* @param point_number : number of points
*/
inline void generate_random_int_vector(std::vector<int> &vecInt, int point_number){
vecInt.resize(point_number);
for(int i=0; i<point_number; i++){
vecInt[i] = rand();
}
}
/*!
* generates a list of triplets without double entry, according to the combinatorial number system
* @param triplets : table of 3-vector to fill with the indexes of the points
* @param number_of_points : number of points to consider
* @param plane_number : number of triplets to generate
* @param vecInt : table of random int
*/
inline void list_of_triplets(std::vector<Vector3> &triplets,
const int &number_of_points,
const unsigned int &plane_number,
std::vector<int> &vecInt){
/*
* Here we take care of not using twice the same plane
* For that we use the combinatorial number system
*/
//computing the number of permutations
unsigned long long total_comb = number_of_points;
total_comb *=(number_of_points-1);
total_comb *= (number_of_points-2);
total_comb/=6;
std::vector<unsigned long long> tab_binome_3(number_of_points+1);
std::vector<unsigned long long> tab_binome_2(number_of_points+1);
for(int i=0; i<number_of_points+1; i++){
if(i>3){
tab_binome_3[i] = tab_binome_3[i-1] * i /(i-3);
}else if(i==3){
tab_binome_3[i] = 1;
}else{
tab_binome_3[i] = 0;
}
if(i>2){
tab_binome_2[i] = tab_binome_2[i-1] * i /(i-2);
}else if(i==2){
tab_binome_2[i] = 1;
}else{
tab_binome_2[i] = 0;
}
}
std::vector<unsigned long long> comb_idx(plane_number);
for(int i=0; i<plane_number; i++){
comb_idx[i] = i%total_comb;
}
if(total_comb < RAND_MAX){
std::map<int,int> table_next;
for(int i=0; i<plane_number; i++){
int temp_idx = vecInt[i%vecInt.size()]%total_comb;
if(temp_idx < plane_number){
int temp = comb_idx[i];
comb_idx[i] = comb_idx[temp_idx];
comb_idx[temp_idx] = temp;
}else{
std::map<int,int>::iterator itmap = table_next.find(temp_idx);
if(itmap != table_next.end()){
int temp = comb_idx[i];
comb_idx[i] = itmap->second;
itmap->second = temp;
}else{
comb_idx[i] = temp_idx;
table_next.insert(std::pair<int,int>(temp_idx,i));
}
}
}
}else{
unsigned long long ref_RAND_MAX = RAND_MAX;
int size_test = 0;
while(ref_RAND_MAX < total_comb){
size_test++;
ref_RAND_MAX*=RAND_MAX;
}
std::map<unsigned long long ,unsigned long long > table_next;
int pos=0;
for(int i=0; i<plane_number; i++){
//generating a random int
unsigned long long random_int=vecInt[pos%vecInt.size()];
pos++;
for(int j=0; j<size_test; j++){
random_int += ((unsigned long long)vecInt[pos%vecInt.size()])*RAND_MAX*(j+1);
pos++;
}
random_int = random_int % total_comb;
if(random_int < plane_number){
int temp = comb_idx[i];
comb_idx[i] = comb_idx[random_int];
comb_idx[random_int] = temp;
}else{
std::map<unsigned long long,unsigned long long>::iterator itmap = table_next.find(random_int);
if(itmap != table_next.end()){
int temp = comb_idx[i];
comb_idx[i] = itmap->second;
itmap->second = temp;
}else{
comb_idx[i] = random_int;
table_next.insert(std::pair<unsigned long long,unsigned long long>(random_int,i));
}
}
}
}
//getting the triplets from the numbers
triplets.resize(plane_number);
for(int pos = 0; pos< plane_number; pos++){
int comb[3];
unsigned long long idx = comb_idx[pos];
int pos_temp = 0;
while(tab_binome_3[pos_temp]<=idx){
pos_temp++;
}
pos_temp --;
comb[0] = pos_temp;
idx -= tab_binome_3[pos_temp];
if(idx==0){
comb[1] = 1;
comb[2] = 0;
triplets[pos] = Vector3(comb[0], comb[1], comb[2]);
continue;
}
pos_temp = 0;
while(tab_binome_2[pos_temp]<=idx){
pos_temp++;
}
pos_temp --;
comb[1] = pos_temp;
idx -= tab_binome_2[pos_temp];
if(idx==0){
comb[2] = 0;
triplets[pos] = Vector3(comb[0], comb[1], comb[2]);
continue;
}
pos_temp = 0;
while(pos_temp!=idx){
pos_temp++;
}
comb[2] = pos_temp;
triplets[pos] = Vector3(comb[0], comb[1], comb[2]);
}
}
/*!
* generates vector n integer (cubes idx) with probability depending of the intersection with the sphere
* @param cubes_idx : table of int to fill
* @param n : number of integer to be drawn
*/
inline void generate_cube_vector(std::vector<int> &cubes_idx, int n){
//probabilities of picking a cube (ponderates by an approximation of its volume intersecting the sphere)
std::vector<float> probas(n_cubes*n_cubes*n_cubes);
float step = 2.f/n_cubes;
float xmin = -(n_cubes/2.f)*step;
float ymin = -(n_cubes/2.f)*step;
float zmin = -(n_cubes/2.f)*step;
float sum_prob = 0;
for(int k=0; k<n_cubes; k++){
for(int j=0; j<n_cubes; j++){
for(int i=0; i<n_cubes; i++){
float prob=0;
float x1 = xmin + i*step;
float y1 = ymin + j*step;
float z1 = zmin + k*step;
float x2 = x1+step;
float y2 = y1+step;
float z2 = z1+step;
Vector3 pt(x1,y1,z1);
if(pt*pt<= 1){
prob += 0.125;
}
pt = Vector3(x2,y1,z1);
if(pt*pt<= 1){
prob += 0.125;
}
pt = Vector3(x1,y2,z1);
if(pt*pt<= 1){
prob += 0.125;
}
pt = Vector3(x2,y2,z1);
if(pt*pt<= 1){
prob += 0.125;
}
pt = Vector3(x1,y1,z2);
if(pt*pt<= 1){
prob += 0.125;
}
pt = Vector3(x2,y1,z2);
if(pt*pt<= 1){
prob += 0.125;
}
pt = Vector3(x1,y2,z2);
if(pt*pt<= 1){
prob += 0.125;
}
pt = Vector3(x2,y2,z2);
if(pt*pt<= 1){
prob += 0.125;
}
probas[i +j*n_cubes +k*n_cubes*n_cubes] = prob;
sum_prob+= prob;
}
}
}
//cumulative proba sum
probas[0] /=sum_prob;
for(int i=1; i<n_cubes*n_cubes*n_cubes; i++){
probas[i] /= sum_prob;
probas[i] += probas[i-1];
}
//getting the cubes according to the probas
cubes_idx.resize(n);
for(int i=0; i<n; i++) {
float pos = (rand()+0.f)/RAND_MAX;
int begin = 0;
int end = n_cubes*n_cubes*n_cubes-1;
int temp = (begin+end)/2;
while(temp != begin){
if(probas[temp] < pos){
begin = temp;
}else{
end = temp;
}
temp = (begin+end)/2;
}
cubes_idx[i] = end;
}
}
/*!
* assigns points to the table of small cubes
* @param cubes : table of table of indices of points belonging to cubes
* @param radius : radius of the neighborhood sphere
* @param refPoint : point where the normal is computed
* @param points : table of neighborhood points (transformed)
* @param points2 : table of neighborhood points (original)
* @param rotMat : table of random rotations
*/
inline void assign_points_to_cubes( std::vector<int> cubes[],
float radius, Point3 &refPoint, std::vector<Point3> &points, std::vector<Point3> &points2,
Matrix3 &rotMat
){
float step = 2.f/n_cubes * radius;
for(unsigned int i=0; i<points2.size(); i++){
points[i] = points2[i];
points[i] = points[i].transform(rotMat);
Point3 refPoint2 = refPoint;
refPoint2 = refPoint2.transform(rotMat);
int x = std::max(0,std::min(int((points[i].x() - refPoint2.x())/step +(n_cubes/2.f)),n_cubes-1));
int y = std::max(0,std::min(int((points[i].y() - refPoint2.y())/step +(n_cubes/2.f)),n_cubes-1));
int z = std::max(0,std::min(int((points[i].z() - refPoint2.z())/step +(n_cubes/2.f)),n_cubes-1));
cubes[x+y*n_cubes+z*n_cubes*n_cubes].push_back(i);
}
}
/*!
* fills a vector with triplets of int
* @param triplets - table of triplets of points to fill
* @param cubes - table of table of point indices belonging to the cubes
* @param cubes_idx - table of random indices of cubes
* @param vecInt - table of random int
*/
inline void generate_cubes_triplets(std::vector<Vector3> &triplets,
std::vector<int> cubes[], std::vector<int> &cubes_idx, std::vector<int> &vecInt){
triplets.resize(n_planes);
vecVec3Iterator ittrip = triplets.begin();
std::vector<int>::iterator itcube = cubes_idx.begin();
std::vector<int>::iterator itint = vecInt.begin();
int idx = 0;
int coord[3];
while(ittrip != triplets.end() && itcube!= cubes_idx.end()){
if(cubes[*itcube].size()!=0){
int new_idx = cubes[*itcube][(*itint)%cubes[*itcube].size()];
bool is_valid = true;
for(int i=0; i<idx; i++){
if(new_idx == coord[i]){
is_valid = false;
}
}
if(is_valid){
coord[idx] = new_idx;
idx ++;
}
if(idx == 3){
idx =0;
*ittrip = Vector3(coord[0], coord[1], coord[2]);
ittrip ++;
}
}
itcube++;
itint++;
}
while(ittrip != triplets.end()){
int picked_points = 0;
while(picked_points < 3){
int pos = rand()%(n_cubes*n_cubes*n_cubes);
if(cubes[pos].size()==0){
continue;
}
int idx = rand()%cubes[pos].size();
bool is_valid = true;
for(int i=0; i<picked_points; i++){
if(cubes[pos][idx] == coord[i]){
is_valid = false;
}
}
if(is_valid){
coord[picked_points] = cubes[pos][idx];
picked_points++;
}
}
(*ittrip) = Vector3(coord[0], coord[1], coord[2]);
ittrip++;
}
}
/*! compute a random triplet on the sphere of radius
* @param tree - kdtree of the neighborhood
* @param radius1 - radius of the neighborhood sphere
* @param radius2 - small radius for triplet searching
* @param triplet - triplet to fill with the selected points indices
* @param pt - point where the normal is computed
*/
inline void find_a_triplet(Tree &tree, float radius1, float radius2, Triplet &triplet,
Point3 &pt){
int picked_points = 0;
while(picked_points < 3){
//picking point in the unit balls
/*
*For fast results we pick them in the cube and discard bad points
*/
float x,y,z;
do{
x = ((rand()+0.f)/RAND_MAX)*2-1;
y = ((rand()+0.f)/RAND_MAX)*2-1;
z = ((rand()+0.f)/RAND_MAX)*2-1;
}while(x*x + y*y +z*z >1);
x *= radius1;
y *= radius1;
z *= radius1;
x += pt.x();
y += pt.y();
z += pt.z();
Point3 pt(x,y,z);
Fuzzy_sphere s_query(pt,radius2);
std::vector<Point3> points_search;
tree.search(std::back_inserter(points_search), s_query);
if(points_search.size()!=0){
Point3 new_Point = points_search[rand()%points_search.size()];
bool is_valid = true;
for(int i=0; i<picked_points; i++){
if(new_Point == triplet(i)){
is_valid = false;
}
}
if(is_valid){
triplet(picked_points) = new_Point;
picked_points ++;
}
}
}
}
/*!
* fills a vector of Point Triplets
* @param triplets - table of triplets to fill
* @param plane_number - number of triplets
* @param radius1 - radius of the neighborhood sphere
* @param radius2 - small radius for triplet selection
* @param tree - kdtree of the neighborhood
* @param pt - point where the normal is computed
* @param points - table of random points in the unit ball
* @param vecInt - table of random integers
*/
inline void generate_list_of_triplets(std::vector<Triplet> &triplets,
int plane_number, float radius1,
float radius2,Tree &tree,
Point3 & pt,
std::vector<Point3> &points,
std::vector<int> &vecInt){
triplets.resize(plane_number);
vecTripIterator ittrip = triplets.begin();
vecPt3Iterator itpoints = points.begin();
std::vector<int>::iterator itint = vecInt.begin();
int idx = 0;
while(ittrip != triplets.end() && itpoints!= points.end()){
//getting coordinates
float x,y,z;
x = (*itpoints).x()*radius1 + pt.x();
y = (*itpoints).y()*radius1 + pt.y();
z = (*itpoints).z()*radius1 + pt.z();
//searching neighbors of the point
Point3 refPoint(x,y,z);
Fuzzy_sphere s_query(refPoint,radius2);
std::vector<Point3> points_search;
tree.search(std::back_inserter(points_search), s_query);
//testing the validity
if(points_search.size()!=0){
Point3 new_point = points_search[(*itint)%points_search.size()];
bool is_valid = true;
for(int i=0; i<idx; i++){
if(new_point == (*ittrip)(i)){
is_valid = false;
}
}
if(is_valid){
(*ittrip)(idx) = new_point;
idx ++;
}
if(idx == 3){
idx =0;
ittrip ++;
}
}
itpoints++;
itint++;
}
while(ittrip != triplets.end()){
find_a_triplet(tree, radius1, radius2, *ittrip,pt);
ittrip++;
}
}
/*!
* Compute the normal by filling an accumulator for a given neighborhood
* @param d1 - First dimension of the accumulator
* @param d2 - Second dimension of the accumulator
* @param points - table of neighbors
* @param n - index of the point where the normal is computed
* @param triplets - table of triplets (points indices)
* @param conf_interv - table of confidence intervals
*/
float normal_at_point(
const int d1, const int d2,
std::vector<Point3> &points,
int n,
std::vector<Vector3> &triplets,
std::vector<float> &conf_interv){
if(points.size() < 3){
nls[n]=Vector3(0,0,0);
return 0;
}
//creation and initialization accumulators
float *votes = new float[d1*d2];
Vector3 *votesV = new Vector3[d1*d2];
for(int i=0; i<d1; i++){
for(int j=0; j<d2; j++){
votes[i+j*d1]=0;
votesV[i+j*d1] = Vector3(0,0,0);
}
}
float max1 = 0, max2=0;
int i1=0, i2=0;
int j1=0, j2=0;
float votes_val;
for(int n_try=0; n_try< n_planes; n_try++){
int p0 = triplets[n_try][0];
int p1 = triplets[n_try][1];
int p2 = triplets[n_try][2];
Vector3 v1 = points[p1]-points[p0];
Vector3 v2 = points[p2]-points[p0];
Vector3 Pn = CGAL::cross_product(v1,v2);
Pn = Pn / sqrt(Pn*Pn);
if(Pn*(points[p0]-CGAL::ORIGIN)>0){
Pn = -Pn;
}
float phi;
phi = acos((float)Pn[2]);
float dphi = PI/n_phi;
int posp, post;
posp = int(floor( (phi+dphi/2.) *n_phi/ PI));
if(posp == 0 || posp== n_phi){
post =0;
}else{
float theta = acos((float)Pn[0]/sqrt(float(Pn[0]*Pn[0]+Pn[1]*Pn[1])));
if(Pn[1]<0){
theta *= -1;
theta += 2*PI;
}
float dtheta = PI/(n_phi*sin(posp*dphi));
post = (int)(floor((theta+dtheta/2)/dtheta))%(2*n_phi);
}
post = std::max(0,std::min(2*n_phi-1,post));
posp = std::max(0,std::min(n_phi,posp));
votes[post+posp*d1] += 1.;
votesV[post+posp*d1] = Pn + votesV[post+posp*d1];
max1 = votes[i1+j1*d1]/(n_try+1);
max2 = votes[i2+j2*d1]/(n_try+1);
votes_val = votes[post+posp*d1]/(n_try+1);
if(votes_val > max1){
max2 = max1;
i2 = i1;
j2 = j1;
max1 = votes_val;
i1 = post;
j1 = posp;
}else if(votes_val>max2 && post!= i1 && posp!=j1){
max2 = votes_val;
i2 = post;
j2 = posp;
}
if(max1-conf_interv[n_try] > max2){
break;
}
}
nls[n] = votesV[i1+j1*d1] / sqrt(votesV[i1+j1*d1]*votesV[i1+j1*d1]);
delete[] votes;
delete[] votesV;
return max1;
}
/*!
* Compute the normal by filling an accumulator for a given neighborhood
* @param d1 - First dimension of the accumulator
* @param d2 - Second dimension of the accumulator
* @param points_size - size of the neighborhood
* @param n - index of the point where the normal is computed
* @param triplets - table of triplets (Points coordinates)
* @param conf_interv - table of confidence intervals
*/
float normal_at_point(
const int d1, const int d2,
int points_size,
int n,
std::vector<Triplet> &triplets,
std::vector<float> &conf_interv){
if(points_size < 3){
nls[n]=Vector3(0,0,0);
return 0;
}
//creation and initialization accumulators
float *votes = new float[d1*d2];
Vector3 *votesV = new Vector3[d1*d2];
for(int i=0; i<d1; i++){
for(int j=0; j<d2; j++){
votes[i+j*d1]=0;
votesV[i+j*d1] = Vector3(0,0,0);
}
}
float max1 = 0, max2=0;
int i1=0, i2=0;
int j1=0, j2=0;
float votes_val;
for(int n_try=0; n_try< n_planes; n_try++){
Point3 p0 = triplets[n_try](0);
Point3 p1 = triplets[n_try](1);
Point3 p2 = triplets[n_try](2);
Vector3 v1 = p1-p0;
Vector3 v2 = p2-p0;
Vector3 Pn = CGAL::cross_product(v1,v2);
Pn = Pn / sqrt(Pn*Pn);
if(Pn*(p0-CGAL::ORIGIN)>0){
Pn = -Pn;
}
float phi;
phi = acos((float)Pn[2]);
float dphi = PI/n_phi;
int posp, post;
posp = int(floor( (phi+dphi/2.) *n_phi/ PI));
if(posp == 0 || posp== n_phi){
post =0;
}else{
float theta = acos((float)Pn[0]/sqrt(float(Pn[0]*Pn[0]+Pn[1]*Pn[1])));
if(Pn[1]<0){
theta *= -1;
theta += 2*PI;
}
float dtheta = PI/(n_phi*sin(posp*dphi));
post = (int)(floor((theta+dtheta/2)/dtheta))%(2*n_phi);
}
post = std::max(0,std::min(2*n_phi-1,post));
posp = std::max(0,std::min(n_phi,posp));
votes[post+posp*d1] += 1.;
votesV[post+posp*d1] = Pn + votesV[post+posp*d1];
max1 = votes[i1+j1*d1]/(n_try+1);
max2 = votes[i2+j2*d1]/(n_try+1);
votes_val = votes[post+posp*d1]/(n_try+1);
if(votes_val > max1){
max2 = max1;
i2 = i1;
j2 = j1;
max1 = votes_val;
i1 = post;
j1 = posp;
}else if(votes_val>max2 && post!= i1 && posp!=j1){
max2 = votes_val;
i2 = post;
j2 = posp;
}
if(max1-conf_interv[n_try] > max2){
break;
}
}
nls[n]= votesV[i1+j1*d1]/sqrt(votesV[i1+j1*d1]*votesV[i1+j1*d1]);
delete[] votes;
delete[] votesV;
return max1;
}
/*!
* Compute the normal depending of the estimation choice (mean, best, cluster)
* @param rotations - number of rotations
* @param normals_vec - table of estimated normals for the point
* @param normals_conf - table of the confidence of normals
*/
inline Vector3 normal_selection(int &rotations,
std::vector<Vector3> &normals_vec, std::vector<float> &normals_conf){
std::vector<bool> normals_use(rotations);
//init normals_use and reorient normals
normals_use[0] = true;
for(int i=1; i<rotations; i++){
normals_use[i] = true;
if(normals_vec[0] *normals_vec[i]<0){
normals_vec[i] = -normals_vec[i];
}
}
Vector3 normal_final;
switch(selection_type){
case 1: //best
{
float confidence_final=0;
for(int i=0; i<rotations; i++){
if(normals_conf[i]>confidence_final){
confidence_final = normals_conf[i];
normal_final = normals_vec[i];
}
}
}
break;
case 2: //mb
{
std::vector<std::pair<Vector3, float> > normals_fin;
int number_to_test = rotations;
while(number_to_test>0){
//getting the max
float max_conf=0;
int idx = 0;
for(int i=0; i<rotations; i++){
if(normals_use[i] && normals_conf[i]> max_conf){
max_conf = normals_conf[i];
idx = i;
}
}
normals_fin.push_back(std::pair<Vector3, float>(normals_vec[idx]*normals_conf[idx], normals_conf[idx]));
normals_use[idx] = false;
number_to_test--;
for(int i=0; i<rotations; i++){
if(normals_use[i] && acos(normals_vec[idx]*normals_vec[i])< tol_angle_rad){
normals_use[i] = false;
number_to_test --;
normals_fin.back().first = normals_fin.back().first+normals_vec[i]*normals_conf[i];
normals_fin.back().second += normals_conf[i];
}
}
}
normal_final = normals_fin[0].first;
float conf_fin = normals_fin[0].second;
for(unsigned int i=1; i<normals_fin.size(); i++){
if(normals_fin[i].second> conf_fin){
conf_fin = normals_fin[i].second;
normal_final = normals_fin[i].first;
}
}
}
break;
default: //mean
{
normal_final = normals_conf[0]*normals_vec[0];
for(int i=1; i<rotations; i++){
normal_final = normal_final + normals_conf[i]*normals_vec[i];
}
}
break;
}
return normal_final /sqrt(normal_final*normal_final);
}
/*!
* \brief Knn search, Points draw of the planes
* Normal Estimation using a k-nearest neighbor search, planes are drawn directly upon the points of the neighborhood
* @param neighbor_number : number of neighbors
*/
void points_knn(int neighbor_number){
//initialize the random number generator
srand((unsigned int)time(NULL));
nls.resize(pts.size());
//dimensions of the accumulator
const int d1 = 2*n_phi;
const int d2 = n_phi+1;
//creation of the rotation matrices and their inverses
std::vector<Matrix3> rotMat;
std::vector<Matrix3> rotMatInv;
generate_rotation_matrix(rotMat,rotMatInv, n_rot*200);
int rotations;
if(n_rot==0){
rotations = 1;
}else{
rotations = n_rot;
}
//confidence intervals (2 intervals length)
std::vector<float> conf_interv(n_planes);
for(int i=0; i<n_planes; i++){
conf_interv[i] = 2.f/std::sqrt(i+1.f);
}
std::vector<int> vecInt;
generate_random_int_vector(vecInt, 1000000);
//creating the list of triplets
std::vector<Vector3> trip;
if(rotations <=1){
list_of_triplets(trip, neighbor_number,n_planes,vecInt);
}else{
list_of_triplets(trip, neighbor_number,rotations*n_planes,vecInt);
}
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
#pragma omp parallel
{
Tree tree(pts.begin(), pts.end());
#pragma omp for schedule(guided)
#else
Tree tree(pts.begin(), pts.end());
#endif
for(int n=0; n<(int)pts.size(); n++){
std::vector<Point3>points(neighbor_number);
std::vector<Point3>points2(neighbor_number);
int points_size = 0;
//getting the list of neighbors
Neighbor_search search(tree, pts[n], neighbor_number);
for(Neighbor_search_iterator it = search.begin(); it != search.end() && points_size < neighbor_number; ++it){
points[points_size] = it->first;
points2[points_size] = it->first;
points_size++;
}
std::vector<Vector3> normals_vec(rotations);
std::vector<float> normals_conf(rotations);
for(int i=0; i<rotations; i++){
vecVec3Iterator first = trip.begin() + i*n_planes;
vecVec3Iterator last = trip.begin() + (i+1)*n_planes;
std::vector<Vector3> triplets(first, last);
for(unsigned int pt= 0; pt < points_size; pt++){
points[pt]=points2[pt].transform(rotMat[(n+i)%rotMat.size()]);
}
normals_conf[i] = normal_at_point(d1, d2,points, n, triplets, conf_interv);
normals_vec[i] = nls[n].transform(rotMatInv[(n+i)%rotMat.size()]);
}
nls[n] = normal_selection( rotations, normals_vec,normals_conf);
}
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
}
#endif
}
/*!
* \brief Radius search, Points draw of the planes
* Normal Estimation using a range neighbor search, planes are drwn directly upon the points of the neighborhood
* @param radius : range radius for neighborhood search
*/
void points_radius(float radius){
//initialize the random number generator
srand((unsigned int)time(NULL));
nls.resize(pts.size());
//dimensions of the accumulator
const int d1 = 2*n_phi;
const int d2 = n_phi+1;
//creation of the rotation matrices and their inverses
std::vector<Matrix3> rotMat;
std::vector<Matrix3> rotMatInv;
generate_rotation_matrix(rotMat,rotMatInv, n_rot*200);
int rotations;
if(n_rot==0){
rotations = 1;
}else{
rotations = n_rot;
}
//confidence intervals (2 intervals length)
std::vector<float> conf_interv(n_planes);
for(int i=0; i<n_planes; i++){
conf_interv[i] = 2.f/std::sqrt(i+1.f);
}
std::vector<int> vecInt;
generate_random_int_vector(vecInt, 1000000);
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
#pragma omp parallel
{
Tree tree(pts.begin(), pts.end());
#pragma omp for schedule(guided)
#else
Tree tree(pts.begin(), pts.end());
#endif
for(int n=0; n<(int)pts.size(); n++){
//std::cout << n<<std::endl;
std::vector<Point3>points2;
Fuzzy_sphere s_query(pts[n],radius);
//getting the list of neighbors
tree.search(std::back_inserter(points2), s_query);
int points_size = (int) points2.size();
if(points_size <lower_neighbor_bound_neighbors){
points2.clear();
//getting the list of neighbors
Neighbor_search search(tree, pts[n], lower_neighbor_bound_neighbors);
for(Neighbor_search_iterator it = search.begin(); it != search.end() && points_size < lower_neighbor_bound_neighbors; ++it){
points2.push_back(it->first);
points_size++;
}
points_size = points2.size();
}
unsigned long int max_number_comb = points_size;
max_number_comb*=points_size-1;
max_number_comb*=points_size-2;
if(max_number_comb < n_planes){continue;}
//creating the list of triplets
std::vector<Vector3> trip;
if(rotations <=1){
list_of_triplets(trip, points_size,n_planes,vecInt);
}else{
list_of_triplets(trip, points_size,rotations*n_planes,vecInt);
}
std::vector<Vector3> normals_vec(rotations);
std::vector<float> normals_conf(rotations);
std::vector<Point3> points(points_size);
for(int i=0; i<rotations; i++){
vecVec3Iterator first = trip.begin() + i*n_planes;
vecVec3Iterator last = trip.begin() + (i+1)*n_planes;
std::vector<Vector3> triplets(first, last);
for(unsigned int pt= 0; pt < points_size; pt++){
points[pt]=points2[pt].transform(rotMat[(n+i)%rotMat.size()]);
}
normals_conf[i] = normal_at_point( d1, d2,points, n, triplets, conf_interv);
normals_vec[i] = nls[n].transform(rotMatInv[(n+i)%rotMat.size()]);
}
nls[n] = normal_selection( rotations, normals_vec,normals_conf);
}
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
}
#endif
}
/*!
* \brief Knn search, Uniform draw of the planes
* Normal Estimation using a k-nearest neighbor search, planes are drawn uniformly in the neighborhood sphere
* @param neighbor_number : number of neighbors
*/
void unif_knn(int neighbor_number)
{
//initialize the random number generator
srand((unsigned int)time(NULL));
//resizing the normal point cloud
nls.resize(pts.size());
Tree tree(pts.begin(), pts.end());
//dimensions of the accumulator
const int d1 = 2*n_phi;
const int d2 = n_phi+1;
//confidence intervals (2 intervals length)
std::vector<float> conf_interv(n_planes);
for(int i=0; i<n_planes; i++){
conf_interv[i] = 2.f/std::sqrt(i+1.f);
}
//creation of the rotation matrices and their inverses
std::vector<Matrix3> rotMat;
std::vector<Matrix3> rotMatInv;
generate_rotation_matrix(rotMat,rotMatInv, n_rot*200);
int rotations;
if(n_rot==0){
rotations = 1;
}else{
rotations = n_rot;
}
//creation of vector of int and points
std::vector<Point3> points_rand;
std::vector<int> vecInt;
generate_random_points_vector(points_rand, 1000000);
generate_random_int_vector(vecInt, 1000000);
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
#pragma omp parallel for schedule(guided)
#endif
for(int n=0; n<(int) pts.size(); n++){
std::vector<Point3>points(neighbor_number);
int points_size = 0;
float radius = 0;
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
#pragma omp critical
#endif
{
//getting the list of neighbors
Neighbor_search search(tree, pts[n], neighbor_number);
for(Neighbor_search_iterator it = search.begin(); it != search.end() && points_size < neighbor_number; ++it){
points[points_size] = it->first;
points_size++;
if(radius < it->second){
radius = it->second;
}
}
}
if(points_size != neighbor_number){continue;}
radius = sqrt(radius);
float s_radius = radius / small_radius_factor;
//point cloud of neighbors and kdtree creation
Tree tree_neighbors(points.begin(), points.end());
//creating the list of triplets
std::vector<Triplet> trip;
generate_list_of_triplets(trip,rotations*n_planes,radius,s_radius,tree_neighbors,pts[n], points_rand,vecInt);
std::vector<Vector3> normals_vec(rotations);
std::vector<float> normals_conf(rotations);
for(int i=0; i<rotations; i++){
vecTripIterator first = trip.begin() + i*n_planes;
vecTripIterator last = trip.begin() + (i+1)*n_planes;
std::vector<Triplet> triplets(first, last);
for(unsigned int tr=0; tr <triplets.size(); tr++){
triplets[tr](0)=triplets[tr](0).transform(rotMat[(n+i)%rotMat.size()]);
triplets[tr](1)=triplets[tr](1).transform(rotMat[(n+i)%rotMat.size()]);
triplets[tr](2)=triplets[tr](2).transform(rotMat[(n+i)%rotMat.size()]);
}
normals_conf[i] = normal_at_point( d1, d2,points_size, n, triplets, conf_interv);
normals_vec[i] =nls[n].transform(rotMatInv[(n+i)%rotMat.size()]);
}
nls[n] = normal_selection( rotations, normals_vec,normals_conf);
}
}
/*!
* \brief Radius search, Uniform draw of the planes
* Normal Estimation using a range neighbor search, planes are drawn uniformly in the neighborhood sphere
* @param radius : range radius for neighborhood search
*/
void unif_radius(float radius)
{
//initialize the random number generator
srand((unsigned int)time(NULL));
//resizing the normal point cloud
nls.resize(pts.size());
Tree tree(pts.begin(), pts.end());
//dimensions of the accumulator
const int d1 = 2*n_phi;
const int d2 = n_phi+1;
//confidence intervals (2 intervals length)
std::vector<float> conf_interv(n_planes);
for(int i=0; i<n_planes; i++){
conf_interv[i] = 2.f/std::sqrt(i+1.f);
}
//creation of the rotation matrices and their inverses
std::vector<Matrix3> rotMat;
std::vector<Matrix3> rotMatInv;
generate_rotation_matrix(rotMat,rotMatInv, n_rot*200);
int rotations;
if(n_rot==0){
rotations = 1;
}else{
rotations = n_rot;
}
//creation of vector of int and points
std::vector<Point3> points_rand;
std::vector<int> vecInt;
generate_random_points_vector(points_rand, 1000000);
generate_random_int_vector(vecInt, 1000000);
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
#pragma omp parallel for schedule(guided)
#endif
for(int n=0; n<(int) pts.size(); n++){
std::vector<Point3> points;
Fuzzy_sphere s_query(pts[n],radius);
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
#pragma omp critical
#endif
{
tree.search(std::back_inserter(points), s_query);
}
int points_size = points.size();
float radius2 = radius;
if(points_size <lower_neighbor_bound_neighbors){
radius2 = 0;
points.clear();
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
#pragma omp critical
#endif
{
//getting the list of neighbors
Neighbor_search search(tree, pts[n], lower_neighbor_bound_neighbors);
for(Neighbor_search_iterator it = search.begin(); it != search.end() && points_size < lower_neighbor_bound_neighbors; ++it){
points.push_back(it->first);
points_size++;
if(radius2 < it->second){
radius2 = it->second;
}
}
}
points_size = points.size();
}
float s_radius = radius2 / small_radius_factor;
//point cloud of neighbors and kdtree creation
Tree tree_neighbors(points.begin(), points.end());
//creating the list of triplets
std::vector<Triplet> trip;
generate_list_of_triplets(trip,rotations*n_planes,radius2,s_radius,tree_neighbors,pts[n], points_rand,vecInt);
std::vector<Vector3> normals_vec(rotations);
std::vector<float> normals_conf(rotations);
for(int i=0; i<rotations; i++){
vecTripIterator first = trip.begin() + i*n_planes;
vecTripIterator last = trip.begin() + (i+1)*n_planes;
std::vector<Triplet> triplets(first, last);
for(unsigned int tr=0; tr <triplets.size(); tr++){
triplets[tr](0)=triplets[tr](0).transform(rotMat[(n+i)%rotMat.size()]);
triplets[tr](1)=triplets[tr](1).transform(rotMat[(n+i)%rotMat.size()]);
triplets[tr](2)=triplets[tr](2).transform(rotMat[(n+i)%rotMat.size()]);
}
normals_conf[i] = normal_at_point( d1, d2,points_size, n, triplets, conf_interv);
normals_vec[i] = nls[n].transform(rotMatInv[(n+i)%rotMat.size()]);
}
nls[n] = normal_selection( rotations, normals_vec,normals_conf);
}
}
/*!
* \brief Knn search, Cubes draw of the planes
* Normal Estimation using a k-nearest neighbor search, planes are drawn using a cubic discretization of the neighborhood sphere
* @param neighbor_number : number of neighbors
*/
void cubes_knn(int neighbor_number)
{
//resizing the normal point cloud
nls.resize(pts.size());
//initialize the random number generator
srand((unsigned int)time(NULL));
//dimensions of the accumulator
const int d1 = 2*n_phi;
const int d2 = n_phi+1;
//confidence intervals (2 intervals length)
std::vector<float> conf_interv(n_planes);
for(int i=0; i<n_planes; i++){
conf_interv[i] = 2.f/std::sqrt(i+1.f);
}
//creation of the rotation matrices and their inverses
std::vector<Matrix3> rotMat;
std::vector<Matrix3> rotMatInv;
generate_rotation_matrix(rotMat,rotMatInv, n_rot*200);
int rotations;
if(n_rot==0){
rotations = 1;
}else{
rotations = n_rot;
}
//creation of vector of cubes and int
std::vector<int> cubes_idx;
std::vector<int> vecInt;
generate_cube_vector(cubes_idx, 1000000);
generate_random_int_vector(vecInt, 1000000);
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
#pragma omp parallel
{
Tree tree(pts.begin(), pts.end());
#pragma omp for schedule(guided)
#else
Tree tree(pts.begin(), pts.end());
#endif
for(int n=0; n<(int)pts.size(); n++){
std::vector<Point3>points2(neighbor_number);
std::vector<Point3> points(neighbor_number);
int points_size = 0;
float radius = 0;
//getting the list of neighbors
Neighbor_search search(tree, pts[n], neighbor_number);
for(Neighbor_search_iterator it = search.begin(); it != search.end() && points_size < neighbor_number; ++it){
points2[points_size] = it->first;
points[points_size] = it->first;
points_size++;
if(radius < it->second){
radius = it->second;
}
}
if(points_size != neighbor_number){continue;}
radius = sqrt(radius);
std::vector<int>* cubes = new std::vector<int>[n_cubes*n_cubes*n_cubes];
std::vector<Vector3> triplets;
std::vector<Vector3> normals_vec(rotations);
std::vector<float> normals_conf(rotations);
for(int i=0; i<rotations; i++){
assign_points_to_cubes(cubes,radius,pts[n], points,points2,rotMat[(n+i)%rotMat.size()]);
generate_cubes_triplets(triplets, cubes, cubes_idx, vecInt);
//cout << "1" << endl;
normals_conf[i] = normal_at_point( d1, d2,points, n, triplets, conf_interv);
//cout << "2" << endl;
for(unsigned int pt= 0; pt < points_size; pt++){
points[pt]=points2[pt];
}
normals_vec[i]= nls[n].transform(rotMatInv[(n+i)%rotMat.size()]);
}
nls[n] = normal_selection( rotations, normals_vec,normals_conf);
delete[] cubes;
}
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
}
#endif
}
/*!
* \brief Radius search, Cubes draw of the planes
* Normal Estimation using a range neighbor search, planes are drawn using a cubic discretization of the neighborhood sphere
* @param radius
*/
void cubes_radius(float radius)
{
//resizing the normal point cloud
nls.resize(pts.size());
//initialize the random number generator
srand((unsigned int)time(NULL));
//dimensions of the accumulator
const int d1 = 2*n_phi;
const int d2 = n_phi+1;
//confidence intervals (2 intervals length)
std::vector<float> conf_interv(n_planes);
for(int i=0; i<n_planes; i++){
conf_interv[i] = 2.f/std::sqrt(i+1.f);
}
//creation of the rotation matrices and their inverses
std::vector<Matrix3> rotMat;
std::vector<Matrix3> rotMatInv;
generate_rotation_matrix(rotMat,rotMatInv, n_rot*200);
int rotations;
if(n_rot==0){
rotations = 1;
}else{
rotations = n_rot;
}
//creation of vector of cubes and int
std::vector<int> cubes_idx;
std::vector<int> vecInt;
generate_cube_vector(cubes_idx, 1000000);
generate_random_int_vector(vecInt, 1000000);
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
#pragma omp parallel
{
Tree tree(pts.begin(), pts.end());
#pragma omp for schedule(guided)
#else
Tree tree(pts.begin(), pts.end());
#endif
for(int n=0; n<(int)pts.size(); n++){
std::vector<Point3>points2;
Fuzzy_sphere s_query(pts[n],radius);
tree.search(std::back_inserter(points2), s_query);
int points_size = points2.size();
float radius2 = radius;
if(points_size <lower_neighbor_bound_neighbors){
radius2 = 0;
points2.clear();
//getting the list of neighbors
Neighbor_search search(tree, pts[n], lower_neighbor_bound_neighbors);
for(Neighbor_search_iterator it = search.begin(); it != search.end() && points_size < lower_neighbor_bound_neighbors; ++it){
points2.push_back(it->first);
points_size++;
if(radius2 < it->second){
radius2 = it->second;
}
}
points_size = points2.size();
}
std::vector<int>* cubes = new std::vector<int>[n_cubes*n_cubes*n_cubes];
std::vector<Vector3> triplets;
std::vector<Vector3> normals_vec(rotations);
std::vector<float> normals_conf(rotations);
std::vector<Point3> points(points_size);
for(int i=0; i<rotations; i++){
assign_points_to_cubes(cubes,radius2,pts[n], points,points2,rotMat[(n+i)%rotMat.size()]);
generate_cubes_triplets(triplets, cubes, cubes_idx, vecInt);
//cout << "1" << endl;
normals_conf[i] = normal_at_point( d1, d2,points, n, triplets, conf_interv);
//cout << "2" << endl;
for(unsigned int pt= 0; pt < points_size; pt++){
points[pt]=points2[pt];
}
normals_vec[i] = nls[n].transform(rotMatInv[(n+i)%rotMat.size()]);
}
nls[n] = normal_selection( rotations, normals_vec,normals_conf);
delete[] cubes;
}
#if defined(_OPENMP) && defined(USE_OPENMP_FOR_NORMEST)
}
#endif
}
};
#endif
|
convolution_1x1_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack4to1_msa(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
v4f32 _val = (v4f32)__msa_ld_w(r0, 0);
__msa_st_w((v4i32)_val, outptr, 0);
r0 += 4 * 2;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4to1_msa(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
simd_utils_avx_int32.h | /*
* Project : SIMD_Utils
* Version : 0.1.12
* Author : JishinMaster
* Licence : BSD-2
*/
#pragma once
#include <stdint.h>
#include "immintrin.h"
#ifdef __AVX2__
static inline void add256s(int32_t *src1, int32_t *src2, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_store_si256((__m256i *) (dst + i), _mm256_add_epi32(_mm256_load_si256((__m256i *) (src1 + i)), _mm256_load_si256((__m256i *) (src2 + i))));
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_add_epi32(_mm256_loadu_si256((__m256i *) (src1 + i)), _mm256_loadu_si256((__m256i *) (src2 + i))));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src1[i] + src2[i];
}
}
static inline void mul256s(int32_t *src1, int32_t *src2, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_store_si256((__m256i *) (dst + i), _mm256_mul_epi32(_mm256_load_si256((__m256i *) (src1 + i)), _mm256_load_si256((__m256i *) (src2 + i))));
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_mul_epi32(_mm256_loadu_si256((__m256i *) (src1 + i)), _mm256_loadu_si256((__m256i *) (src2 + i))));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src1[i] * src2[i];
}
}
static inline void sub256s(int32_t *src1, int32_t *src2, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_store_si256((__m256i *) (dst + i), _mm256_sub_epi32(_mm256_load_si256((__m256i *) (src1 + i)), _mm256_load_si256((__m256i *) (src2 + i))));
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_sub_epi32(_mm256_loadu_si256((__m256i *) (src1 + i)), _mm256_loadu_si256((__m256i *) (src2 + i))));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src1[i] - src2[i];
}
}
static inline void addc256s(int32_t *src, int32_t value, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
const v8si tmp = _mm256_set1_epi32(value);
if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_store_si256((__m256i *) (dst + i), _mm256_add_epi32(tmp, _mm256_load_si256((__m256i *) (src + i))));
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_add_epi32(tmp, _mm256_loadu_si256((__m256i *) (src + i))));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = src[i] + value;
}
}
// Experimental
static inline void copy256s(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_store_si256((__m256i *) (dst + i), _mm256_load_si256((__m256i *) (src + i)));
}
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void copy256s_2(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / (2 * AVX_LEN_INT32);
stop_len *= (2 * AVX_LEN_INT32);
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += 2 * AVX_LEN_INT32) {
__m256i tmp1 = _mm256_load_si256((__m256i *) (src + i));
__m256i tmp2 = _mm256_load_si256((__m256i *) (src + i + AVX_LEN_INT32));
_mm256_store_si256((__m256i *) (dst + i), tmp1);
_mm256_store_si256((__m256i *) (dst + i + AVX_LEN_INT32), tmp2);
}
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void fast_copy256s(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
_mm256_stream_si256((__m256i *) (dst + i), _mm256_stream_load_si256((__m256i *) (src + i)));
}
_mm_mfence();
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void fast_copy256s_2(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / (2 * AVX_LEN_INT32);
stop_len *= (2 * AVX_LEN_INT32);
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += 2 * AVX_LEN_INT32) {
__m256i tmp1 = _mm256_stream_load_si256((__m256i *) (src + i));
__m256i tmp2 = _mm256_stream_load_si256((__m256i *) (src + i + AVX_LEN_INT32));
_mm256_stream_si256((__m256i *) (dst + i), tmp1);
_mm256_stream_si256((__m256i *) (dst + i + AVX_LEN_INT32), tmp2);
}
_mm_mfence();
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
static inline void fast_copy256s_4(int32_t *src, int32_t *dst, int len)
{
int stop_len = len / (4 * AVX_LEN_INT32);
stop_len *= (4 * AVX_LEN_INT32);
#ifdef OMP
#pragma omp parallel for schedule(auto)
#endif
for (int i = 0; i < stop_len; i += 4 * AVX_LEN_INT32) {
__m256i tmp1 = _mm256_stream_load_si256((__m256i *) (src + i));
__m256i tmp2 = _mm256_stream_load_si256((__m256i *) (src + i + AVX_LEN_INT32));
__m256i tmp3 = _mm256_stream_load_si256((__m256i *) (src + i + 2 * AVX_LEN_INT32));
__m256i tmp4 = _mm256_stream_load_si256((__m256i *) (src + i + 3 * AVX_LEN_INT32));
_mm256_stream_si256((__m256i *) (dst + i), tmp1);
_mm256_stream_si256((__m256i *) (dst + i + AVX_LEN_INT32), tmp2);
_mm256_stream_si256((__m256i *) (dst + i + 2 * AVX_LEN_INT32), tmp3);
_mm256_stream_si256((__m256i *) (dst + i + 3 * AVX_LEN_INT32), tmp4);
}
_mm_mfence();
for (int i = stop_len; i < len; i++) {
dst[i] = src[i];
}
}
#endif
static inline __m256i _mm256_absdiff_epi16(__m256i a, __m256i b)
{
__m256i cmp, difab, difba;
cmp = _mm256_cmpgt_epi16(a,b);
difab = _mm256_sub_epi16(a,b);
difba = _mm256_sub_epi16 (b,a);
difab = _mm256_and_si256(cmp, difab);
difba = _mm256_andnot_si256(cmp, difba);
return _mm256_or_si256(difab, difba);
}
static inline __m256i _mm256_absdiff_epi32(__m256i a, __m256i b)
{
__m256i cmp, difab, difba;
cmp = _mm256_cmpgt_epi32(a,b);
difab = _mm256_sub_epi32(a,b);
difba = _mm256_sub_epi32 (b,a);
difab = _mm256_and_si256(cmp, difab);
difba = _mm256_andnot_si256(cmp, difba);
return _mm256_or_si256(difab, difba);
}
static inline __m256i _mm256_absdiff_epi8(__m256i a, __m256i b)
{
__m256i cmp, difab, difba;
cmp = _mm256_cmpgt_epi8(a,b);
difab = _mm256_sub_epi8(a,b);
difba = _mm256_sub_epi8 (b,a);
difab = _mm256_and_si256(cmp, difab);
difba = _mm256_andnot_si256(cmp, difba);
return _mm256_or_si256(difab, difba);
}
static inline void absdiff16s_256s(int16_t *src1, int16_t *src2, int16_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT16;
stop_len *= AVX_LEN_INT16;
if (areAligned3((uintptr_t) (src1), (uintptr_t) (src2), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT16) {
__m256i a = _mm256_load_si256((__m256i *) (src1 + i));
__m256i b = _mm256_load_si256((__m256i *) (src2 + i));
_mm256_store_si256((__m256i *)(dst + i), _mm256_absdiff_epi16(a,b));
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT16) {
__m256i a = _mm256_loadu_si256((__m256i *) (src1 + i));
__m256i b = _mm256_loadu_si256((__m256i *) (src2 + i));
_mm256_storeu_si256((__m256i *) (dst + i), _mm256_absdiff_epi16(a,b));
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = abs(src1[i] - src2[i]);
}
}
static inline void powerspect16s_256s_interleaved(complex16s_t *src, int32_t *dst, int len)
{
int stop_len = len / AVX_LEN_INT32;
stop_len *= AVX_LEN_INT32;
int j = 0;
if (areAligned2((uintptr_t) (src), (uintptr_t) (dst), AVX_LEN_BYTES)) {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
__m256i reim = _mm256_load_si256((__m256i *)((const int16_t *)src + j));
// print8i(reim); printf("\n");
_mm256_store_si256((__m256i*)(dst + i), _mm256_madd_epi16 (reim, reim));
j += AVX_LEN_INT16;
}
} else {
for (int i = 0; i < stop_len; i += AVX_LEN_INT32) {
__m256i reim = _mm256_loadu_si256((__m256i *)((const int16_t *)src + j));
_mm256_storeu_si256((__m256i*)(dst + i), _mm256_madd_epi16 (reim, reim));
j += AVX_LEN_INT16;
}
}
for (int i = stop_len; i < len; i++) {
dst[i] = (int32_t)src[i].re * (int32_t)src[i].re + (int32_t)src[i].im * (int32_t)src[i].im;
}
}
|
nr_sgx_direct.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <math.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "nr_direct.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
typedef struct {
int ncomp;
int v_dims[3];
double *data;
} SGXJKArray;
typedef struct {
SGXJKArray *(*allocate)(int *shls_slice, int *ao_loc, int ncomp);
void (*contract)(double *eri, double *dm, SGXJKArray *vjk,
int i0, int i1, int j0, int j1, int k0);
void (*set0)(SGXJKArray *, int);
void (*send)(SGXJKArray *, int, double *);
void (*finalize)(SGXJKArray *, double *);
void (*sanity_check)(int *shls_slice);
} SGXJKOperator;
int GTOmax_shell_dim(const int *ao_loc, const int *shls_slice, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
#define DECLARE_ALL \
const int *atm = envs->atm; \
const int *bas = envs->bas; \
const double *env = envs->env; \
const int natm = envs->natm; \
const int nbas = envs->nbas; \
const int *ao_loc = envs->ao_loc; \
const int *shls_slice = envs->shls_slice; \
const CINTOpt *cintopt = envs->cintopt; \
const int ish0 = shls_slice[0]; \
const int ish1 = shls_slice[1]; \
const int jsh0 = shls_slice[2]; \
const int jsh1 = shls_slice[3]; \
const int ksh0 = shls_slice[4]; \
const int ioff = ao_loc[ish0]; \
const int joff = ao_loc[jsh0]; \
int i0, j0, i1, j1, ish, jsh, idm; \
int shls[3]; \
int (*fprescreen)(); \
if (vhfopt) { \
fprescreen = vhfopt->fprescreen; \
} else { \
fprescreen = CVHFnoscreen; \
} \
/*
* for given ksh, lsh, loop all ish, jsh
*/
void SGXdot_nrs1(int (*intor)(), SGXJKOperator **jkop, SGXJKArray **vjk,
double **dms, double *buf, double *cache, int n_dm, int ksh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
DECLARE_ALL;
shls[2] = ksh0 + ksh;
for (ish = ish0; ish < ish1; ish++) {
for (jsh = jsh0; jsh < jsh1; jsh++) {
shls[0] = ish;
shls[1] = jsh;
if ((*fprescreen)(shls, vhfopt, atm, bas, env)
&& (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
i0 = ao_loc[ish ] - ioff;
j0 = ao_loc[jsh ] - joff;
i1 = ao_loc[ish+1] - ioff;
j1 = ao_loc[jsh+1] - joff;
for (idm = 0; idm < n_dm; idm++) {
jkop[idm]->contract(buf, dms[idm], vjk[idm],
i0, i1, j0, j1, ksh);
}
}
} }
}
/*
* ish >= jsh
*/
void SGXdot_nrs2(int (*intor)(), SGXJKOperator **jkop, SGXJKArray **vjk,
double **dms, double *buf, double *cache, int n_dm, int ksh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
DECLARE_ALL;
shls[2] = ksh0 + ksh;
for (ish = ish0; ish < ish1; ish++) {
for (jsh = jsh0; jsh <= ish; jsh++) {
shls[0] = ish;
shls[1] = jsh;
if ((*fprescreen)(shls, vhfopt, atm, bas, env)
&& (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
i0 = ao_loc[ish ] - ioff;
j0 = ao_loc[jsh ] - joff;
i1 = ao_loc[ish+1] - ioff;
j1 = ao_loc[jsh+1] - joff;
for (idm = 0; idm < n_dm; idm++) {
jkop[idm]->contract(buf, dms[idm], vjk[idm],
i0, i1, j0, j1, ksh);
}
}
} }
}
void SGXnr_direct_drv(int (*intor)(), void (*fdot)(), SGXJKOperator **jkop,
double **dms, double **vjk, int n_dm, int ncomp,
int *shls_slice, int *ao_loc,
CINTOpt *cintopt, CVHFOpt *vhfopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
IntorEnvs envs = {natm, nbas, atm, bas, env, shls_slice, ao_loc, NULL,
cintopt, ncomp};
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
int nksh = ksh1 - ksh0;
int di = GTOmax_shell_dim(ao_loc, shls_slice, 2);
int cache_size = GTOmax_cache_size(intor, shls_slice, 2,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, fdot, jkop, ao_loc, shls_slice, \
dms, vjk, n_dm, ncomp, nbas, vhfopt, envs, \
nksh, di, cache_size)
{
int i, ksh;
SGXJKArray *v_priv[n_dm];
for (i = 0; i < n_dm; i++) {
v_priv[i] = jkop[i]->allocate(shls_slice, ao_loc, ncomp);
}
double *buf = malloc(sizeof(double) * di*di*ncomp);
double *cache = malloc(sizeof(double) * cache_size);
#pragma omp for nowait schedule(dynamic, 1)
for (ksh = 0; ksh < nksh; ksh++) {
for (i = 0; i < n_dm; i++) {
jkop[i]->set0(v_priv[i], ksh);
}
(*fdot)(intor, jkop, v_priv, dms, buf, cache, n_dm, ksh,
vhfopt, &envs);
for (i = 0; i < n_dm; i++) {
jkop[i]->send(v_priv[i], ksh, vjk[i]);
}
}
#pragma omp critical
{
for (i = 0; i < n_dm; i++) {
jkop[i]->finalize(v_priv[i], vjk[i]);
}
}
free(buf);
free(cache);
}
}
void SGXsetnr_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
nbas = opt->nbas;
double *q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
opt->q_cond = q_cond;
int shls_slice[] = {0, nbas};
int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, q_cond, ao_loc, atm, natm, bas, nbas, env, cache_size)
{
double qtmp, tmp;
int ij, i, j, di, dj, ish, jsh;
int shls[2];
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *cache = malloc(sizeof(double) * (di*di + cache_size));
double *buf = cache + cache_size;
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
if (bas(ATOM_OF,ish) == bas(ATOM_OF,jsh)) {
// If two shells are on the same center, their
// overlap integrals may be zero due to symmetry.
// But their contributions to sgX integrals should
// be recognized.
q_cond[ish*nbas+jsh] = 1;
q_cond[jsh*nbas+ish] = 1;
continue;
}
shls[0] = ish;
shls[1] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
NULL, cache)) {
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = fabs(buf[i+di*j]);
qtmp = MAX(qtmp, tmp);
} }
}
q_cond[ish*nbas+jsh] = qtmp;
q_cond[jsh*nbas+ish] = qtmp;
}
free(cache);
}
}
int SGXnr_ovlp_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1;
}
int i = shls[0];
int j = shls[1];
int n = opt->nbas;
assert(opt->q_cond);
assert(i < n);
assert(j < n);
return opt->q_cond[i*n+j] > opt->direct_scf_cutoff;
}
#define JTYPE1 1
#define JTYPE2 2
#define KTYPE1 3
#define ALLOCATE(label, task) \
static SGXJKArray *SGXJKOperator_allocate_##label(int *shls_slice, int *ao_loc, int ncomp) \
{ \
SGXJKArray *jkarray = malloc(sizeof(SGXJKArray)); \
jkarray->v_dims[0] = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]; \
jkarray->v_dims[1] = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]; \
jkarray->v_dims[2] = ao_loc[shls_slice[5]] - ao_loc[shls_slice[4]]; \
if (task == JTYPE1) { \
jkarray->data = malloc(sizeof(double) * ncomp); \
} else if (task == JTYPE2) { \
jkarray->data = calloc(ncomp * jkarray->v_dims[0] * jkarray->v_dims[1], sizeof(double)); \
} else { \
jkarray->data = malloc(sizeof(double) * ncomp * jkarray->v_dims[0]); \
} \
jkarray->ncomp = ncomp; \
return jkarray; \
} \
static void SGXJKOperator_set0_##label(SGXJKArray *jkarray, int k) \
{ \
int ncomp = jkarray->ncomp; \
int i; \
double *data = jkarray->data; \
if (task == JTYPE1) { \
for (i = 0; i < ncomp; i++) { \
data[i] = 0; \
} \
} else if (task == KTYPE1) { \
for (i = 0; i < ncomp * jkarray->v_dims[0]; i++) { \
data[i] = 0; \
} \
} \
} \
static void SGXJKOperator_send_##label(SGXJKArray *jkarray, int k, double *out) \
{ \
int ncomp = jkarray->ncomp; \
int i, icomp; \
double *data = jkarray->data; \
int ni = jkarray->v_dims[0]; \
int nk = jkarray->v_dims[2]; \
if (task == JTYPE1) { \
for (i = 0; i < ncomp; i++) { \
out[i*nk+k] = data[i]; \
} \
} else if (task == KTYPE1) { \
for (icomp = 0; icomp < ncomp; icomp++) { \
for (i = 0; i < ni; i++) { \
out[k*ni+i] = data[i]; \
} \
out += nk * ni; \
data += ni; \
} \
} \
} \
static void SGXJKOperator_final_##label(SGXJKArray *jkarray, double *out) \
{ \
int i; \
double *data = jkarray->data; \
if (task == JTYPE2) { \
for (i = 0; i < jkarray->ncomp * jkarray->v_dims[0] * jkarray->v_dims[1]; i++) { \
out[i] += data[i]; \
} \
} \
SGXJKOperator_deallocate(jkarray); \
}
#define ADD_OP(fname, task, type) \
ALLOCATE(fname, task) \
SGXJKOperator SGX##fname = {SGXJKOperator_allocate_##fname, fname, \
SGXJKOperator_set0_##fname, SGXJKOperator_send_##fname, \
SGXJKOperator_final_##fname, \
SGXJKOperator_sanity_check_##type}
static void SGXJKOperator_deallocate(SGXJKArray *jkarray)
{
free(jkarray->data);
free(jkarray);
}
static void SGXJKOperator_sanity_check_s1(int *shls_slice)
{
}
static void SGXJKOperator_sanity_check_s2(int *shls_slice)
{
if (!((shls_slice[0] == shls_slice[2]) &&
(shls_slice[1] == shls_slice[3]))) {
fprintf(stderr, "Fail at s2\n");
exit(1);
};
}
static void nrs1_ijg_ji_g(double *eri, double *dm, SGXJKArray *out,
int i0, int i1, int j0, int j1, int k0)
{
const int ncol = out->v_dims[0];
int i, j, icomp;
double g;
double *data = out->data;
int ij = 0;
for (icomp = 0; icomp < out->ncomp; icomp++) {
g = 0;
for (j = j0; j < j1; j++) {
for (i = i0; i < i1; i++, ij++) {
g += eri[ij] * dm[j*ncol+i];
} }
data[icomp] += g;
}
}
ADD_OP(nrs1_ijg_ji_g, JTYPE1, s1);
static void nrs2_ijg_ji_g(double *eri, double *dm, SGXJKArray *out,
int i0, int i1, int j0, int j1, int k0)
{
if (i0 == j0) {
return nrs1_ijg_ji_g(eri, dm, out, i0, i1, j0, j1, k0);
}
const int ncol = out->v_dims[0];
int i, j, icomp;
double g;
double *data = out->data;
int ij = 0;
for (icomp = 0; icomp < out->ncomp; icomp++) {
g = 0;
for (j = j0; j < j1; j++) {
for (i = i0; i < i1; i++, ij++) {
g += eri[ij] * (dm[j*ncol+i] + dm[i*ncol+j]);
} }
data[icomp] += g;
}
}
ADD_OP(nrs2_ijg_ji_g, JTYPE1, s2);
static void nrs1_ijg_g_ij(double *eri, double *dm, SGXJKArray *out,
int i0, int i1, int j0, int j1, int k0)
{
int ni = out->v_dims[0];
int nj = out->v_dims[1];
int i, j, icomp;
double *data = out->data;
int ij = 0;
for (icomp = 0; icomp < out->ncomp; icomp++) {
for (j = j0; j < j1; j++) {
for (i = i0; i < i1; i++, ij++) {
data[i*nj+j] += eri[ij] * dm[k0];
} }
data += ni * nj;
}
}
ADD_OP(nrs1_ijg_g_ij, JTYPE2, s1);
SGXJKOperator SGXnrs2_ijg_g_ij = {SGXJKOperator_allocate_nrs1_ijg_g_ij,
nrs1_ijg_g_ij, SGXJKOperator_set0_nrs1_ijg_g_ij,
SGXJKOperator_send_nrs1_ijg_g_ij, SGXJKOperator_final_nrs1_ijg_g_ij,
SGXJKOperator_sanity_check_s2};
static void nrs1_ijg_gj_gi(double *eri, double *dm, SGXJKArray *out,
int i0, int i1, int j0, int j1, int k0)
{
const int ncol = out->v_dims[1];
double *data = out->data;
int i, j, icomp;
int ij = 0;
for (icomp = 0; icomp < out->ncomp; icomp++) {
for (j = j0; j < j1; j++) {
for (i = i0; i < i1; i++, ij++) {
data[i] += eri[ij] * dm[k0*ncol+j];
} }
data += out->v_dims[0];
}
}
ADD_OP(nrs1_ijg_gj_gi, KTYPE1, s1);
static void nrs2_ijg_gj_gi(double *eri, double *dm, SGXJKArray *out,
int i0, int i1, int j0, int j1, int k0)
{
if (i0 == j0) {
return nrs1_ijg_gj_gi(eri, dm, out, i0, i1, j0, j1, k0);
}
const int ncol = out->v_dims[0];
double *data = out->data;
int i, j, icomp;
int ij = 0;
for (icomp = 0; icomp < out->ncomp; icomp++) {
for (j = j0; j < j1; j++) {
for (i = i0; i < i1; i++, ij++) {
data[i] += eri[ij] * dm[k0*ncol+j];
data[j] += eri[ij] * dm[k0*ncol+i];
} }
data += out->v_dims[0];
}
}
ADD_OP(nrs2_ijg_gj_gi, KTYPE1, s2);
|
GB_binop__max_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__max_uint8
// A.*B function (eWiseMult): GB_AemultB__max_uint8
// A*D function (colscale): GB_AxD__max_uint8
// D*A function (rowscale): GB_DxB__max_uint8
// C+=B function (dense accum): GB_Cdense_accumB__max_uint8
// C+=b function (dense accum): GB_Cdense_accumb__max_uint8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_uint8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_uint8
// C=scalar+B GB_bind1st__max_uint8
// C=scalar+B' GB_bind1st_tran__max_uint8
// C=A+scalar GB_bind2nd__max_uint8
// C=A'+scalar GB_bind2nd_tran__max_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IMAX (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_UINT8 || GxB_NO_MAX_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__max_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__max_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__max_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__max_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__max_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__max_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__max_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__max_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__max_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__max_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB_bind1st_tran__max_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__max_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_assign_zombie2.c | //------------------------------------------------------------------------------
// GB_assign_zombie2: delete all entries in C(i,:) for GB_assign
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C(i,:)<!> = anything: GrB_Row_assign or GrB_Col_assign with an empty
// complemented mask requires all entries in C(i,:) to be deleted.
// C must be sparse or hypersparse.
#include "GB_assign.h"
#include "GB_assign_zombie.h"
void GB_assign_zombie2
(
GrB_Matrix C,
const int64_t i,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_FULL (C)) ;
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (GB_ZOMBIES_OK (C)) ;
ASSERT (!GB_JUMBLED (C)) ; // binary search is used
ASSERT (!GB_PENDING (C)) ;
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
const int64_t *restrict Cp = C->p ;
int64_t *restrict Ci = C->i ;
const int64_t Cnvec = C->nvec ;
int64_t nzombies = C->nzombies ;
const int64_t zorig = nzombies ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
//--------------------------------------------------------------------------
// C(i,:) = empty
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
int64_t kfirst, klast ;
GB_PARTITION (kfirst, klast, Cnvec, taskid, ntasks) ;
for (int64_t k = kfirst ; k < klast ; k++)
{
//------------------------------------------------------------------
// find C(i,j)
//------------------------------------------------------------------
int64_t pC = Cp [k] ;
int64_t pC_end = Cp [k+1] ;
int64_t pright = pC_end - 1 ;
bool found, is_zombie ;
GB_BINARY_SEARCH_ZOMBIE (i, Ci, pC, pright, found, zorig,
is_zombie) ;
//------------------------------------------------------------------
// if found and not a zombie, mark it as a zombie
//------------------------------------------------------------------
if (found && !is_zombie)
{
ASSERT (i == Ci [pC]) ;
nzombies++ ;
Ci [pC] = GB_FLIP (i) ;
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
C->nzombies = nzombies ;
}
|
convolution_7x7.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv7x7s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
#ifdef __clang__ // __ARM_NEON && __aarch64__ && __clang__
if (nn > 0)
{
asm volatile(
// v0: input / final output
// v1 v2 v3: = ri0 ri4 ri0n , i <- 1-7
// v4 = ri1 / ri3 / ri6
// v5 = ri2 / ri5
// v9 = intermediate sum register
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
//i = 1
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%2] \n"
"add %2, %2, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmul v9.4s, v1.4s, %18.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %18.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %18.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %18.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %19.s[0] \n"
"fmla v0.4s, v5.4s, %19.s[1] \n"
"fmla v9.4s, v4.4s, %19.s[2] \n"
//i = 2
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%3] \n" // v1 v2 v3: = r20 r24 r20n
"add %3, %3, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n" // v4 = r21
"fmla v9.4s, v1.4s, %20.s[0] \n" // *+ r10
"ext v5.16b, v1.16b, v2.16b, #8 \n" // v5 = r22
"fmla v0.4s, v4.4s, %20.s[1] \n" // *+ r11
"ext v4.16b, v1.16b, v2.16b, #12 \n" // v4 = r23
"fmla v9.4s, v5.4s, %20.s[2] \n" // *+ r1
"ext v5.16b, v2.16b, v3.16b, #4 \n" // v5 = r25
"fmla v0.4s, v4.4s, %20.s[3] \n" // *+ r13
"ext v4.16b, v2.16b, v3.16b, #8 \n" // v4 = r26
"fmla v9.4s, v2.4s, %21.s[0] \n" // *+ r14
"fmla v0.4s, v5.4s, %21.s[1] \n" // *+ r15
"fmla v9.4s, v4.4s, %21.s[2] \n" // *+ r16
//i = 3
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%4] \n"
"add %4, %4, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %22.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %22.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %22.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %22.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %23.s[0] \n"
"fmla v0.4s, v5.4s, %23.s[1] \n"
"fmla v9.4s, v4.4s, %23.s[2] \n"
//i = 4
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%5] \n"
"add %5, %5, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %24.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %24.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %24.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %24.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %25.s[0] \n"
"fmla v0.4s, v5.4s, %25.s[1] \n"
"fmla v9.4s, v4.4s, %25.s[2] \n"
//i = 5
"prfm pldl1keep, [%6, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%6] \n"
"add %6, %6, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %26.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %26.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %26.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %26.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %27.s[0] \n"
"fmla v0.4s, v5.4s, %27.s[1] \n"
"fmla v9.4s, v4.4s, %27.s[2] \n"
//i = 6
"prfm pldl1keep, [%7, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%7] \n"
"add %7, %7, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %28.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %28.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %28.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %28.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %29.s[0] \n"
"fmla v0.4s, v5.4s, %29.s[1] \n"
"fmla v9.4s, v4.4s, %29.s[2] \n"
//i = 7
"prfm pldl1keep, [%8, #384] \n"
"ld1 {v1.4s, v2.4s, v3.4s}, [%8] \n"
"add %8, %8, #16 \n"
"ext v4.16b, v1.16b, v2.16b, #4 \n"
"fmla v9.4s, v1.4s, %30.s[0] \n"
"ext v5.16b, v1.16b, v2.16b, #8 \n"
"fmla v0.4s, v4.4s, %30.s[1] \n"
"ext v4.16b, v1.16b, v2.16b, #12 \n"
"fmla v9.4s, v5.4s, %30.s[2] \n"
"ext v5.16b, v2.16b, v3.16b, #4 \n"
"fmla v0.4s, v4.4s, %30.s[3] \n"
"ext v4.16b, v2.16b, v3.16b, #8 \n"
"fmla v9.4s, v2.4s, %31.s[0] \n"
"fmla v0.4s, v5.4s, %31.s[1] \n"
"fmla v9.4s, v4.4s, %31.s[2] \n"
"fadd v0.4s, v0.4s, v9.4s \n"
"st1 {v0.4s}, [%1], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6) // %8
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k78910), // %20
"w"(_k11121314), // %21
"w"(_k14151617), // %22
"w"(_k18192021), // %23
"w"(_k21222324), // %24
"w"(_k25262728), // %25
"w"(_k28293031), // %26
"w"(_k32333435), // %27
"w"(_k35363738), // %28
"w"(_k39404142), // %29
"w"(_k42434445), // %30
"w"(_k46474849) // %31
: "cc", "memory","v0", "v1", "v2", "v3", "v4", "v5", "v9"
);
}
#else // __ARM_NEON && __aarch64__ defined, but __clang__ not defined
// When compiled with gcc, gcc does not accept over 30 operands
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4_t _r00 = vld1q_f32(r0);// 0 1 2 3
float32x4_t _r04 = vld1q_f32(r0 + 4);// 4 5 6 7
float32x4_t _r00n = vld1q_f32(r0 + 8);// 8 9 10 11
float32x4_t _r01 = vextq_f32(_r00, _r04, 1);// 1 2 3 4
float32x4_t _r02 = vextq_f32(_r00, _r04, 2);// 2 3 4 5
float32x4_t _r03 = vextq_f32(_r00, _r04, 3);// 3 4 5 6
float32x4_t _r05 = vextq_f32(_r04, _r00n, 1);// 5 6 7 8
float32x4_t _r06 = vextq_f32(_r04, _r00n, 2);// 6 7 8 9
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r14 = vld1q_f32(r1 + 4);
float32x4_t _r10n = vld1q_f32(r1 + 8);
float32x4_t _r11 = vextq_f32(_r10, _r14, 1);
float32x4_t _r12 = vextq_f32(_r10, _r14, 2);
float32x4_t _r13 = vextq_f32(_r10, _r14, 3);
float32x4_t _r15 = vextq_f32(_r14, _r10n, 1);
float32x4_t _r16 = vextq_f32(_r14, _r10n, 2);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r24 = vld1q_f32(r2 + 4);
float32x4_t _r20n = vld1q_f32(r2 + 8);
float32x4_t _r21 = vextq_f32(_r20, _r24, 1);
float32x4_t _r22 = vextq_f32(_r20, _r24, 2);
float32x4_t _r23 = vextq_f32(_r20, _r24, 3);
float32x4_t _r25 = vextq_f32(_r24, _r20n, 1);
float32x4_t _r26 = vextq_f32(_r24, _r20n, 2);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r34 = vld1q_f32(r3 + 4);
float32x4_t _r30n = vld1q_f32(r3 + 8);
float32x4_t _r31 = vextq_f32(_r30, _r34, 1);
float32x4_t _r32 = vextq_f32(_r30, _r34, 2);
float32x4_t _r33 = vextq_f32(_r30, _r34, 3);
float32x4_t _r35 = vextq_f32(_r34, _r30n, 1);
float32x4_t _r36 = vextq_f32(_r34, _r30n, 2);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4_t _r40 = vld1q_f32(r4);
float32x4_t _r44 = vld1q_f32(r4 + 4);
float32x4_t _r40n = vld1q_f32(r4 + 8);
float32x4_t _r41 = vextq_f32(_r40, _r44, 1);
float32x4_t _r42 = vextq_f32(_r40, _r44, 2);
float32x4_t _r43 = vextq_f32(_r40, _r44, 3);
float32x4_t _r45 = vextq_f32(_r44, _r40n, 1);
float32x4_t _r46 = vextq_f32(_r44, _r40n, 2);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4_t _r50 = vld1q_f32(r5);
float32x4_t _r54 = vld1q_f32(r5 + 4);
float32x4_t _r50n = vld1q_f32(r5 + 8);
float32x4_t _r51 = vextq_f32(_r50, _r54, 1);
float32x4_t _r52 = vextq_f32(_r50, _r54, 2);
float32x4_t _r53 = vextq_f32(_r50, _r54, 3);
float32x4_t _r55 = vextq_f32(_r54, _r50n, 1);
float32x4_t _r56 = vextq_f32(_r54, _r50n, 2);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4_t _r60 = vld1q_f32(r6);
float32x4_t _r64 = vld1q_f32(r6 + 4);
float32x4_t _r60n = vld1q_f32(r6 + 8);
float32x4_t _r61 = vextq_f32(_r60, _r64, 1);
float32x4_t _r62 = vextq_f32(_r60, _r64, 2);
float32x4_t _r63 = vextq_f32(_r60, _r64, 3);
float32x4_t _r65 = vextq_f32(_r64, _r60n, 1);
float32x4_t _r66 = vextq_f32(_r64, _r60n, 2);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
r6 += 4;
outptr += 4;
}
#endif // __clang__
#else //__aarch32__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d25}, [%1] \n"// _sum
// "veor q13, q13 \n"// _sum2 = 0;
// "veor q14, q14 \n"// _sum3 = 0;
// "veor q15, q15 \n"// _sum4 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"// q0 = 0 1 2 3
"vmla.f32 q12, q0, d8[0] \n"
"pld [%2, #256] \n"
"vld1.f32 {d4-d7}, [%2] \n"// q2 = 4 5 6 7 q3 = 8 9 10 11
"vmul.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"// q1 = 1 2 3 4
"vext.32 q10, q2, q3, #1 \n"// q10= 5 6 7 8
"vmul.f32 q14, q1, d8[1] \n"
"vmul.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"// q8 = 2 3 4 5
"vext.32 q11, q2, q3, #2 \n"// q11= 6 7 8 9
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"// q9 = 3 4 5 6
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%3, #256] \n"
"vld1.f32 {d4-d7}, [%3] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #128] \n"
"vld1.f32 {d0-d1}, [%4]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%4, #256] \n"
"vld1.f32 {d4-d7}, [%4] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5] \n"
"vmla.f32 q14, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q15, q1, d12[1] \n"
"vmla.f32 q12, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q15, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #128] \n"
"vld1.f32 {d0-d1}, [%6]! \n"
"vmla.f32 q12, q0, d8[0] \n"
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6] \n"
"vmla.f32 q13, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q14, q1, d8[1] \n"
"vmla.f32 q15, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q12, q8, d9[0] \n"
"vmla.f32 q13, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q14, q9, d9[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #128] \n"
"vld1.f32 {d0-d1}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7] \n"
"vmla.f32 q12, q2, d14[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q13, q1, d12[1] \n"
"vmla.f32 q14, q10, d14[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q12, q11, d15[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q13, q9, d13[1] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #128] \n"
"vld1.f32 {d0-d1}, [%8]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"pld [%8, #256] \n"
"vld1.f32 {d4-d7}, [%8] \n"
"vmla.f32 q15, q2, d10[0] \n"
"vext.32 q1, q0, q2, #1 \n"
"vext.32 q10, q2, q3, #1 \n"
"vmla.f32 q12, q1, d8[1] \n"
"vmla.f32 q13, q10, d10[1] \n"
"vext.32 q8, q0, q2, #2 \n"
"vext.32 q11, q2, q3, #2 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q11, d11[0] \n"
"vext.32 q9, q0, q2, #3 \n"
"vmla.f32 q12, q9, d9[1] \n"
"vadd.f32 q13, q13, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"vadd.f32 q12, q12, q13 \n"
"vst1.f32 {d24-d25}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
r6++;
outptr++;
}
r0 += 6;
r1 += 6;
r2 += 6;
r3 += 6;
r4 += 6;
r5 += 6;
r6 += 6;
}
}
}
}
static void conv7x7s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*49 + q*49;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* r4 = img0 + w*4;
const float* r5 = img0 + w*5;
const float* r6 = img0 + w*6;
const float* k0 = kernel0;
const float* k1 = kernel0 + 7;
const float* k2 = kernel0 + 14;
const float* k3 = kernel0 + 21;
const float* k4 = kernel0 + 28;
const float* k5 = kernel0 + 35;
const float* k6 = kernel0 + 42;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k4567 = vld1q_f32(k0 + 4);
float32x4_t _k78910 = vld1q_f32(k1);
float32x4_t _k11121314 = vld1q_f32(k1 + 4);
float32x4_t _k14151617 = vld1q_f32(k2);
float32x4_t _k18192021 = vld1q_f32(k2 + 4);
float32x4_t _k21222324 = vld1q_f32(k3);
float32x4_t _k25262728 = vld1q_f32(k3 + 4);
float32x4_t _k28293031 = vld1q_f32(k4);
float32x4_t _k32333435 = vld1q_f32(k4 + 4);
float32x4_t _k35363738 = vld1q_f32(k5);
float32x4_t _k39404142 = vld1q_f32(k5 + 4);
float32x4_t _k42434445 = vld1q_f32(k6);
float32x4_t _k46474849 = vld1q_f32(k6 + 4);
#ifdef __clang__ // __ARM_NEON && __aarch64__ && __clang__
if (nn > 0)
{
asm volatile(
// v0: input / final output
// v1 v2: = _ri0/_ri1 first
// v3 v4: = then _r0_8101214/_r0_9111315
// v5 = ri2 / ri4 / ri6
// v6 = ri3 / ri5
// v9 = intermediate sum register
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
//i = 1
"prfm pldl1keep, [%2, #512] \n"
"ld2 {v1.4s, v2.4s}, [%2] \n" // v1 v2 = _r00 _r01
"add %2, %2, #32 \n"
"ld2 {v3.4s, v4.4s}, [%2] \n" // v3 v4 = _r0_8101214 / _r0_9111315
"fmul v9.4s, v1.4s, %18.s[0] \n" // *+ _r00
"ext v5.16b, v1.16b, v3.16b, #4 \n" // v5 = _r02
"fmla v0.4s, v2.4s, %18.s[1] \n" // *+ _r01
"ext v6.16b, v2.16b, v4.16b, #4 \n" // v6 = _r03
"fmla v9.4s, v5.4s, %18.s[2] \n" // *+ _r02
"ext v5.16b, v1.16b, v3.16b, #8 \n" // v5 = _r04
"fmla v0.4s, v6.4s, %18.s[3] \n" // *+ _r03
"ext v6.16b, v2.16b, v4.16b, #8 \n" // v6 = _r05
"fmla v9.4s, v5.4s, %19.s[0] \n" // *+ _r04
"ext v5.16b, v1.16b, v3.16b, #12 \n" // v5 = _r06
"fmla v0.4s, v6.4s, %19.s[1] \n" // *+ _r05
"fmla v9.4s, v5.4s, %19.s[2] \n" // *+ _r06
//i = 2
"prfm pldl1keep, [%3, #512] \n"
"ld2 {v1.4s, v2.4s}, [%3] \n"
"add %3, %3, #32 \n"
"ld2 {v3.4s, v4.4s}, [%3] \n"
"fmla v9.4s, v1.4s, %20.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %20.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %20.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %20.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %21.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %21.s[1] \n"
"fmla v9.4s, v5.4s, %21.s[2] \n"
//i = 3
"prfm pldl1keep, [%4, #512] \n"
"ld2 {v1.4s, v2.4s}, [%4] \n"
"add %4, %4, #32 \n"
"ld2 {v3.4s, v4.4s}, [%4] \n"
"fmla v9.4s, v1.4s, %22.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %22.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %22.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %22.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %23.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %23.s[1] \n"
"fmla v9.4s, v5.4s, %23.s[2] \n"
//i = 4
"prfm pldl1keep, [%5, #512] \n"
"ld2 {v1.4s, v2.4s}, [%5] \n"
"add %5, %5, #32 \n"
"ld2 {v3.4s, v4.4s}, [%5] \n"
"fmla v9.4s, v1.4s, %24.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %24.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %24.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %24.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %25.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %25.s[1] \n"
"fmla v9.4s, v5.4s, %25.s[2] \n"
//i = 5
"prfm pldl1keep, [%6, #512] \n"
"ld2 {v1.4s, v2.4s}, [%6] \n"
"add %6, %6, #32 \n"
"ld2 {v3.4s, v4.4s}, [%6] \n"
"fmla v9.4s, v1.4s, %26.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %26.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %26.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %26.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %27.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %27.s[1] \n"
"fmla v9.4s, v5.4s, %27.s[2] \n"
//i = 6
"prfm pldl1keep, [%7, #512] \n"
"ld2 {v1.4s, v2.4s}, [%7] \n"
"add %7, %7, #32 \n"
"ld2 {v3.4s, v4.4s}, [%7] \n"
"fmla v9.4s, v1.4s, %28.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %28.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %28.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %28.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %29.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %29.s[1] \n"
"fmla v9.4s, v5.4s, %29.s[2] \n"
//i = 7
"prfm pldl1keep, [%8, #512] \n"
"ld2 {v1.4s, v2.4s}, [%8] \n"
"add %8, %8, #32 \n"
"ld2 {v3.4s, v4.4s}, [%8] \n"
"fmla v9.4s, v1.4s, %30.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #4 \n"
"fmla v0.4s, v2.4s, %30.s[1] \n"
"ext v6.16b, v2.16b, v4.16b, #4 \n"
"fmla v9.4s, v5.4s, %30.s[2] \n"
"ext v5.16b, v1.16b, v3.16b, #8 \n"
"fmla v0.4s, v6.4s, %30.s[3] \n"
"ext v6.16b, v2.16b, v4.16b, #8 \n"
"fmla v9.4s, v5.4s, %31.s[0] \n"
"ext v5.16b, v1.16b, v3.16b, #12 \n"
"fmla v0.4s, v6.4s, %31.s[1] \n"
"fmla v9.4s, v5.4s, %31.s[2] \n"
"fadd v0.4s, v0.4s, v9.4s \n"
"st1 {v0.4s}, [%1], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6) // %8
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k78910), // %20
"w"(_k11121314), // %21
"w"(_k14151617), // %22
"w"(_k18192021), // %23
"w"(_k21222324), // %24
"w"(_k25262728), // %25
"w"(_k28293031), // %26
"w"(_k32333435), // %27
"w"(_k35363738), // %28
"w"(_k39404142), // %29
"w"(_k42434445), // %30
"w"(_k46474849) // %31
: "cc", "memory","v0", "v1", "v2", "v3", "v4", "v5", "v6", "v9"
);
}
#else // __ARM_NEON && __aarch64__ defined, but __clang__ not defined
// When compiled with gcc, gcc does not accept over 30 operands
for (; nn>0; nn--)
{
float32x4_t _sum = vld1q_f32(outptr);
float32x4x2_t _r00_02461357 = vld2q_f32(r0);
float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8);
float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14
float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15
float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6
float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7
float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8
float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9
float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10
float32x4_t _r05 = vextq_f32(_r01, _r0_9111315, 2);// 5 7 9 11
float32x4_t _r06 = vextq_f32(_r00, _r0_8101214, 3);// 6 8 10 12
_sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0);
_sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1);
_sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2);
_sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3);
_sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0);
_sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1);
_sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2);
float32x4x2_t _r10_02461357 = vld2q_f32(r1);
float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8);
float32x4_t _r1_8101214 = _r10nx2.val[0];
float32x4_t _r1_9111315 = _r10nx2.val[1];
float32x4_t _r10 = _r10_02461357.val[0];
float32x4_t _r11 = _r10_02461357.val[1];
float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1);
float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1);
float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2);
float32x4_t _r15 = vextq_f32(_r11, _r1_9111315, 2);
float32x4_t _r16 = vextq_f32(_r10, _r1_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0);
_sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1);
_sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2);
_sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3);
_sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0);
_sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1);
_sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2);
float32x4x2_t _r20_02461357 = vld2q_f32(r2);
float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8);
float32x4_t _r2_8101214 = _r20nx2.val[0];
float32x4_t _r2_9111315 = _r20nx2.val[1];
float32x4_t _r20 = _r20_02461357.val[0];
float32x4_t _r21 = _r20_02461357.val[1];
float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1);
float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1);
float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2);
float32x4_t _r25 = vextq_f32(_r21, _r2_9111315, 2);
float32x4_t _r26 = vextq_f32(_r20, _r2_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0);
_sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1);
_sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2);
_sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3);
_sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0);
_sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1);
_sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2);
float32x4x2_t _r30_02461357 = vld2q_f32(r3);
float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8);
float32x4_t _r3_8101214 = _r30nx2.val[0];
float32x4_t _r3_9111315 = _r30nx2.val[1];
float32x4_t _r30 = _r30_02461357.val[0];
float32x4_t _r31 = _r30_02461357.val[1];
float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1);
float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1);
float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2);
float32x4_t _r35 = vextq_f32(_r31, _r3_9111315, 2);
float32x4_t _r36 = vextq_f32(_r30, _r3_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0);
_sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1);
_sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2);
_sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3);
_sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0);
_sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1);
_sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2);
float32x4x2_t _r40_02461357 = vld2q_f32(r4);
float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8);
float32x4_t _r4_8101214 = _r40nx2.val[0];
float32x4_t _r4_9111315 = _r40nx2.val[1];
float32x4_t _r40 = _r40_02461357.val[0];
float32x4_t _r41 = _r40_02461357.val[1];
float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1);
float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1);
float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2);
float32x4_t _r45 = vextq_f32(_r41, _r4_9111315, 2);
float32x4_t _r46 = vextq_f32(_r40, _r4_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0);
_sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1);
_sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2);
_sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3);
_sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0);
_sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1);
_sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2);
float32x4x2_t _r50_02461357 = vld2q_f32(r5);
float32x4x2_t _r50nx2 = vld2q_f32(r5 + 8);
float32x4_t _r5_8101214 = _r50nx2.val[0];
float32x4_t _r5_9111315 = _r50nx2.val[1];
float32x4_t _r50 = _r50_02461357.val[0];
float32x4_t _r51 = _r50_02461357.val[1];
float32x4_t _r52 = vextq_f32(_r50, _r5_8101214, 1);
float32x4_t _r53 = vextq_f32(_r51, _r5_9111315, 1);
float32x4_t _r54 = vextq_f32(_r50, _r5_8101214, 2);
float32x4_t _r55 = vextq_f32(_r51, _r5_9111315, 2);
float32x4_t _r56 = vextq_f32(_r50, _r5_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0);
_sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1);
_sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2);
_sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3);
_sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0);
_sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1);
_sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2);
float32x4x2_t _r60_02461357 = vld2q_f32(r6);
float32x4x2_t _r60nx2 = vld2q_f32(r6 + 8);
float32x4_t _r6_8101214 = _r60nx2.val[0];
float32x4_t _r6_9111315 = _r60nx2.val[1];
float32x4_t _r60 = _r60_02461357.val[0];
float32x4_t _r61 = _r60_02461357.val[1];
float32x4_t _r62 = vextq_f32(_r60, _r6_8101214, 1);
float32x4_t _r63 = vextq_f32(_r61, _r6_9111315, 1);
float32x4_t _r64 = vextq_f32(_r60, _r6_8101214, 2);
float32x4_t _r65 = vextq_f32(_r61, _r6_9111315, 2);
float32x4_t _r66 = vextq_f32(_r60, _r6_8101214, 3);
_sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0);
_sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1);
_sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2);
_sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3);
_sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0);
_sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1);
_sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2);
vst1q_f32(outptr, _sum);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
r4 += 8;
r5 += 8;
r6 += 8;
outptr += 4;
}
#endif // __clang__
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d26-d27}, [%1] \n"// _sum
// "veor q14, q14 \n"// _sum2 = 0;
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567
"add %9, #28 \n"
"pld [%2, #512] \n"
"vld2.f32 {d0-d3}, [%2]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7
"vmla.f32 q13, q0, d8[0] \n"
"vmul.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%2] \n"// q2 = 8 10 12 14 q3 = 9 11 13 15
"vext.32 q8, q0, q2, #1 \n"// q8 = 2 4 6 8
"vext.32 q9, q1, q3, #1 \n"// q9 = 3 5 7 9
"vmul.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"// q10= 4 6 8 10
"vext.32 q11, q1, q3, #2 \n"// q11= 5 7 9 11
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"// q12= 6 8 10 12
"vmla.f32 q13, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314
"add %9, #28 \n"
"pld [%3, #512] \n"
"vld2.f32 {d0-d3}, [%3]! \n"
"vmla.f32 q14, q0, d12[0] \n"
"vmla.f32 q15, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%3] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d13[0] \n"
"vmla.f32 q14, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d14[0] \n"
"vmla.f32 q13, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021
"add %9, #28 \n"
"pld [%4, #512] \n"
"vld2.f32 {d0-d3}, [%4]! \n"
"vmla.f32 q15, q0, d8[0] \n"
"vmla.f32 q13, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%4] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d9[0] \n"
"vmla.f32 q15, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d10[0] \n"
"vmla.f32 q14, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728
"add %9, #28 \n"
"pld [%5, #512] \n"
"vld2.f32 {d0-d3}, [%5]! \n"
"vmla.f32 q13, q0, d12[0] \n"
"vmla.f32 q14, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%5] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d13[0] \n"
"vmla.f32 q13, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d14[0] \n"
"vmla.f32 q15, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435
"add %9, #28 \n"
"pld [%6, #512] \n"
"vld2.f32 {d0-d3}, [%6]! \n"
"vmla.f32 q14, q0, d8[0] \n"
"vmla.f32 q15, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%6] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q13, q8, d9[0] \n"
"vmla.f32 q14, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q15, q10, d10[0] \n"
"vmla.f32 q13, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q14, q12, d11[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142
"add %9, #28 \n"
"pld [%7, #512] \n"
"vld2.f32 {d0-d3}, [%7]! \n"
"vmla.f32 q15, q0, d12[0] \n"
"vmla.f32 q13, q1, d12[1] \n"
"vld2.f32 {d4-d7}, [%7] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q14, q8, d13[0] \n"
"vmla.f32 q15, q9, d13[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q13, q10, d14[0] \n"
"vmla.f32 q14, q11, d14[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q15, q12, d15[0] \n"
"pld [%9, #256] \n"
"vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849
"sub %9, #168 \n"// restore k0
"pld [%8, #512] \n"
"vld2.f32 {d0-d3}, [%8]! \n"
"vmla.f32 q13, q0, d8[0] \n"
"vmla.f32 q14, q1, d8[1] \n"
"vld2.f32 {d4-d7}, [%8] \n"
"vext.32 q8, q0, q2, #1 \n"
"vext.32 q9, q1, q3, #1 \n"
"vmla.f32 q15, q8, d9[0] \n"
"vmla.f32 q13, q9, d9[1] \n"
"vext.32 q10, q0, q2, #2 \n"
"vext.32 q11, q1, q3, #2 \n"
"vmla.f32 q14, q10, d10[0] \n"
"vmla.f32 q15, q11, d10[1] \n"
"vext.32 q12, q0, q2, #3 \n"
"vmla.f32 q13, q12, d11[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q13, q13, q14 \n"
"vst1.f32 {d26-d27}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4), // %6
"=r"(r5), // %7
"=r"(r6), // %8
"=r"(k0) // %9
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"7"(r5),
"8"(r6),
"9"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r0[5] * k0[5];
sum += r0[6] * k0[6];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r1[5] * k1[5];
sum += r1[6] * k1[6];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r2[5] * k2[5];
sum += r2[6] * k2[6];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r3[5] * k3[5];
sum += r3[6] * k3[6];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum += r4[5] * k4[5];
sum += r4[6] * k4[6];
sum += r5[0] * k5[0];
sum += r5[1] * k5[1];
sum += r5[2] * k5[2];
sum += r5[3] * k5[3];
sum += r5[4] * k5[4];
sum += r5[5] * k5[5];
sum += r5[6] * k5[6];
sum += r6[0] * k6[0];
sum += r6[1] * k6[1];
sum += r6[2] * k6[2];
sum += r6[3] * k6[3];
sum += r6[4] * k6[4];
sum += r6[5] * k6[5];
sum += r6[6] * k6[6];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
r5 += 2;
r6 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
r5 += tailstep;
r6 += tailstep;
}
}
}
}
|
GB_binop__bxor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bxor_int8
// A.*B function (eWiseMult): GB_AemultB__bxor_int8
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bxor_int8
// C+=b function (dense accum): GB_Cdense_accumb__bxor_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxor_int8
// C=scalar+B GB_bind1st__bxor_int8
// C=scalar+B' GB_bind1st_tran__bxor_int8
// C=A+scalar GB_bind2nd__bxor_int8
// C=A'+scalar GB_bind2nd_tran__bxor_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x) ^ (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_INT8 || GxB_NO_BXOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bxor_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bxor_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bxor_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bxor_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bxor_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bxor_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bxor_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB_bind1st_tran__bxor_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB_bind2nd_tran__bxor_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LambdaRenderer.h | #ifndef LambdaRenderer_H
#define LambdaRenderer_H
#include "Renderers/Renderer.h"
#include "Cameras/Camera.h"
/*
----------------------READ ME----------------------------
More or less written as a concept to see if this is a good direction
to be heading in.
Im docummenting everything to be done so its clear as most of this
is totally foreign to me.
Goal: to implement a diffuse object.
knowing how this^ works we continue with MORE advanced classes/methods.
( See http://people.csail.mit.edu/wojciech/BRDFValidation/ExperimentalValidation-talk.pdf
slide 19. )
This seems a big step, so we need to discuss this in the morning.
/Michal
---------------------------------------------------------
*/
// Below,these two somehow connected to the color matching curves,
// and since i have no clue how those work im leaving this as it is.
#define SAMPLING_RESOLUTION 5
#define NR_OF_WAVELENGTHS (1+(400/SAMPLING_RESOLUTION))
template<int W,int H>
class LambdaRenderer : public Renderer<W,H>{
public:
LambdaRenderer(const std::shared_ptr<LightModel> &lightModel, const std::shared_ptr<Camera> &camera):Renderer<W,H>(lightModel, camera){
}
void compute(Vec3<float> *image) const{
//STEP 0:
// Generate random number seed for each thread -> Using openMP + "Mersenne Twister 19937 generator"
// -- http://en.wikipedia.org/wiki/Mersenne_twister
// Each OpenMp seems to thread get assigned a randomly generated value which
// later is passed through the entire BRDF-pipeline.
// Another ex: http://www.pgroup.com/lit/articles/insider/v2n2a4.htm
//
// This random number generator is described by many sites as one that has a very good
// distribution and good to use in MonteCarlo tracing... (as of yet, no clue on my side)..
#pragma omp parallel for
for (int y = 0; y < H; ++y){
for (int x = 0; x < W; ++x){
int idx = x+W*y;
// Loop over all wavelengths...
// Wavelength in nm [380 - 780]
/* STEP 1:
* for(int l = 0; l < nroflambdas; l++){
* tot_rad[l] = lightModel_->trace(ray, l, prng);
* }
*
* // tot_rad - array of all lambdas found for this pixel!
* // tracemethod should therefore take three params:
* // - camera ray
* // - wavelength
* // - Mersenne_twister random number ( or some other generator )
*/
// STEP 1 implementing..
float radiance[NR_OF_WAVELENGTHS] = {0.0f};
for(int l = 0; l < NR_OF_WAVELENGTHS; l++){
float lambda = 380.0f + SAMPLING_RESOLUTION * l;
//raiance[l] += Renderer<W,H>::lightModel_->radiance(ray, l);
}
/* STEP 2:
*
* image[idx] = ConvertSpectrumToRGBVector(radiance_for_this_ray);
*/
// STEP 2 implementing..
//
Ray ray = Renderer<W,H>::camera_->rasterSpace(x,y,W,H);
image[idx] = Renderer<W,H>::lightModel_->trace(ray);
}
}
}
};
#endif |
macroCalls.c | //Extracted from SMG2000
int a[100][100];
void foo()
{
int i,j;
int hypre__nx,hypre__ny;
#define HYPRE_BOX_SMP_PRIVATE i,j
#define HYPRE_SMP_PRIVATE \
HYPRE_BOX_SMP_PRIVATE,hypre__nx,hypre__ny
#pragma omp parallel for private (HYPRE_SMP_PRIVATE)
for (i=0;i<100; i++)
for (j=0;j<100; j++)
{
hypre__nx =i;
hypre__ny=j;
a[i][j]=hypre__nx+hypre__ny;
}
}
|
GB_unaryop__minv_fp32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_int8
// op(A') function: GB_tran__minv_fp32_int8
// C type: float
// A type: int8_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_int8
(
float *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__carg_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__carg_fp64_fc64)
// op(A') function: GB (_unop_tran__carg_fp64_fc64)
// C type: double
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = (aij)
// unaryop: cij = carg (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = carg (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = (aij) ; \
Cx [pC] = carg (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CARG || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__carg_fp64_fc64)
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = carg (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = (aij) ;
Cx [p] = carg (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__carg_fp64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
repeat_base.h | // ==========================================================================
// SeqAn - The Library for Sequence Analysis
// ==========================================================================
// Copyright (c) 2006-2013, Knut Reinert, FU Berlin
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of Knut Reinert or the FU Berlin nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
//
// ==========================================================================
// Author: David Weese <david.weese@fu-berlin.de>
// ==========================================================================
#ifndef SEQAN_HEADER_REPEAT_BASE_H
#define SEQAN_HEADER_REPEAT_BASE_H
#if SEQAN_ENABLE_PARALLELISM
#include <seqan/parallel.h>
#endif // #if SEQAN_ENABLE_PARALLELISM
namespace seqan {
/**
.Class.Repeat
..summary:Store information about a repeat.
..cat:Index
..signature:Repeat<TPos, TPeriod>
..param.TPos:Type to use for storing positions.
...metafunction:Metafunction.Value
..param.TPeriod:Type to use for storing the repeat period.
...default:1
...metafunction:Metafunction.Size
..include:seqan/index.h
..see:Function.findRepeats
.Memvar.Repeat#beginPosition
..summary:The begin position of the repeat of type $TPos$.
..class:Class.Repeat
.Memvar.Repeat#endPosition
..summary:The end position of the repeat of type $TPos$.
..class:Class.Repeat
.Memvar.Repeat#period
..summary:The period of the repeat of type $TSize$.
..class:Class.Repeat
*/
template <typename TPos, typename TPeriod>
struct Repeat {
TPos beginPosition;
TPos endPosition;
TPeriod period;
};
template <typename TPos, typename TPeriod>
struct Value< Repeat<TPos, TPeriod> > {
typedef TPos Type;
};
template <typename TPos, typename TPeriod>
struct Size< Repeat<TPos, TPeriod> > {
typedef TPeriod Type;
};
template <typename TSize>
struct RepeatFinderParams {
TSize minRepeatLen;
TSize maxPeriod;
};
// custom TSpec for our customized wotd-Index
struct TRepeatFinder;
template <typename TText>
struct Cargo<Index<TText, IndexWotd<TRepeatFinder> > >
{
typedef Index<TText, IndexWotd<TRepeatFinder> > TIndex;
typedef typename Size<TIndex>::Type TSize;
typedef RepeatFinderParams<TSize> Type;
};
// node predicate
template <typename TText, typename TSpec>
bool nodePredicate(Iter<Index<TText, IndexWotd<TRepeatFinder> >, TSpec> &it)
{
// return countOccurrences(it) * nodeDepth(it) >= cargo(container(it)).minRepeatLen;
return countOccurrences(it) * repLength(it) >= cargo(container(it)).minRepeatLen;
}
// monotonic hull
template <typename TText, typename TSpec>
bool nodeHullPredicate(Iter<Index<TText, IndexWotd<TRepeatFinder> >, TSpec> &it)
{
// return nodeDepth(it) <= cargo(container(it)).maxPeriod;
return repLength(it) <= cargo(container(it)).maxPeriod;
}
template <typename TPos>
struct RepeatLess_ : public ::std::binary_function<TPos, TPos, bool>
{
// key less
inline bool operator() (TPos const &a, TPos const &b) {
return posLess(a, b);
}
};
template <typename TValue>
inline bool _repeatMaskValue(TValue const &)
{
// TODO(holtgrew): Maybe use unknownValue<TValue>() instead of specializing for all alphabets, especially since we have Rna5 now and might want Rna5Q later.
return false;
}
template <>
inline bool _repeatMaskValue(Dna5 const &val)
{
return val == unknownValue<Dna5>(); // 'N'
}
template <>
inline bool _repeatMaskValue(Dna5Q const &val)
{
return val == unknownValue<Dna5Q>(); // 'N'
}
template <>
inline bool _repeatMaskValue(Iupac const &val)
{
return val == unknownValue<Iupac>(); // 'N'
}
/*
template <>
inline bool _repeatMaskValue(AminoAcid val)
{
return val == 'X';
}
*/
/**
.Function.findRepeats
..summary:Search for repeats in a text.
..cat:Index
..signature:findRepeats(repeatString, text, minRepeatLength[, maxPeriod])
..param.repeatString:A @Class.String@ of @Class.Repeat@ objects.
..param.text:The text to search repeats in.
...type:Class.String
...type:Class.StringSet
..param.minRepeatLength:The minimum length each reported repeat must have.
..param.maxPeriod:Optionally, the maximal period that reported repeats can have.
...default:1
..remarks:Subsequences of undefined values/$N$s will always be reported.
..example.text:The following demonstrates finding repeats of period 1.
..example.code:
String<Repeat<unsigned, unsigned> > repeats;
Dna5String text = "CGATAAAACTNN";
// repeat 0 AAAA
// repeat 1 NN
findRepeats(repeats, text, 3);
// ==> length(repeats) == 2
// ==> repeats[0] == {beginPosition: 4, endPosition: 8, period: 1}
// ==> repeats[1] == {beginPosition: 11, endPosition: 13, period: 1}
..see:Function.unknownValue
..include:seqan/index.h
..see:Class.Repeat
*/
// TODO(holtgrew): minRepeatLength is 1-off.
// period-1 optimization
template <typename TRepeatStore, typename TString, typename TRepeatSize>
inline void findRepeats(TRepeatStore &repString, TString const &text, TRepeatSize minRepeatLen)
{
typedef typename Value<TRepeatStore>::Type TRepeat;
typedef typename Iterator<TString const>::Type TIterator;
typedef typename Size<TString>::Type TSize;
#if SEQAN_ENABLE_PARALLELISM
typedef typename Value<TString>::Type TValue;
if (length(text) > (TSize)(omp_get_max_threads() * 2 * minRepeatLen)) {
// std::cerr << ">>> PARALLEL WABOOGIE!" << std::endl;
// std::cerr << "omp_get_max_threads() == " << omp_get_max_threads() << std::endl;
// Parallel case.
// NOTE(holtgrew): The minimum text length check above makes it impossible that more than two chunks are
// required to form an otherwise too short repeat.
// TODO(holtgrew): Load balancing? Probably not worth it.
String<TSize> splitters;
String<TRepeatStore> threadLocalStores;
// Each threads finds repeats on its chunk in parallel.
#pragma omp parallel
{
// We have to determine the number of available threads at this point. We will use the number of thread
// local stores to determin the number of available threads later on.
#pragma omp master
{
// std::cerr << "omp_get_num_threads() == " << omp_get_num_threads() << std::endl;
computeSplitters(splitters, length(text), omp_get_num_threads());
resize(threadLocalStores, omp_get_num_threads());
} // end of #pragma omp master
#pragma omp barrier
int const t = omp_get_thread_num();
TRepeatStore & store = threadLocalStores[t];
TRepeat rep;
rep.beginPosition = 0;
rep.endPosition = 0;
rep.period = 1;
// Flags used for force-adding repeats for the chunks that have a left/right neighbour.
bool forceFirst = t > 0;
bool forceLast = (t + 1) < omp_get_num_threads();
// #pragma omp critical
// std::cerr << "omp_get_num_threads() == " << omp_get_num_threads() << std::endl;
TIterator it = iter(text, splitters[t], Standard());
TIterator itEnd = iter(text, splitters[t + 1], Standard());
if (it != itEnd)
{
TValue last = *it;
TSize repLeft = 0;
TSize repRight = 1;
for (++it; it != itEnd; ++it, ++repRight)
{
if (*it != last)
{
// #pragma omp critical
// std::cerr << "t == " << t << ", last == " << last << ", repRight = " << repRight << ", repLeft == " << repLeft << ", minRepeatLen = " << minRepeatLen << ", forceFirst = " << forceFirst << std::endl;
if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen || forceFirst)
{
forceFirst = false;
// insert repeat
rep.beginPosition = splitters[t] + repLeft;
rep.endPosition = splitters[t] + repRight;
// #pragma omp critical
// std::cerr << " t == " << t << ", append" << std::endl;
appendValue(store, rep);
}
repLeft = repRight;
last = *it;
}
}
// #pragma omp critical
// std::cerr << "t == " << t << ", last == " << last << ", repRight = " << repRight << ", repLeft == " << repLeft << ", minRepeatLen = " << minRepeatLen << ", forceLast = " << forceLast << std::endl;
if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen || forceLast)
{
// Insert repeat but only if it is not already in there.
if (empty(store) || (back(store).beginPosition != repLeft && back(store).endPosition != repRight))
{
rep.beginPosition = splitters[t] + repLeft;
rep.endPosition = splitters[t] + repRight;
// #pragma omp critical
// std::cerr << " t == " << t << ", append" << std::endl;
appendValue(store, rep);
}
}
}
} // end of #pragma omp parallel
// std::cerr << ",-- REPEATS BEFORE MENDING\n";
// for (unsigned i = 0; i < length(threadLocalStores); ++i)
// {
// std::cerr << "| i = " << i << std::endl;
// for (unsigned j = 0; j < length(threadLocalStores[i]); ++j)
// std::cerr << "| threadLocalStores[" << i << "][" << j << "] == {" << threadLocalStores[i][j].beginPosition << ", " << threadLocalStores[i][j].endPosition << "}" << std::endl;
// }
// std::cerr << "`--" << std::endl;
// Mend the splice points.
//
// We will copy out infixes described by fromPositions.
String<Pair<TSize> > fromPositions;
resize(fromPositions, length(threadLocalStores));
for (unsigned i = 0; i < length(fromPositions); ++i)
{
fromPositions[i].i1 = 0;
fromPositions[i].i2 = length(threadLocalStores[i]);
}
// First, merge repeats spanning blocks. Do this iteratively until all has been merged.
bool anyChange;
do
{
anyChange = false;
int lastNonEmpty = -1;
for (unsigned i = 0; i < length(threadLocalStores); ++i)
{
if (fromPositions[i].i1 == fromPositions[i].i2)
continue; // Skip empty buckets.
if (lastNonEmpty != -1)
{
bool const adjacent = back(threadLocalStores[lastNonEmpty]).endPosition == front(threadLocalStores[i]).beginPosition;
bool const charsEqual = text[back(threadLocalStores[lastNonEmpty]).beginPosition] == text[front(threadLocalStores[i]).beginPosition];
if (adjacent && charsEqual)
{
anyChange = true;
back(threadLocalStores[lastNonEmpty]).endPosition = front(threadLocalStores[i]).endPosition;
fromPositions[i].i1 += 1;
}
}
if (fromPositions[i].i1 != fromPositions[i].i2)
lastNonEmpty = i;
}
}
while (anyChange);
// Then, remove any repeats in the beginning and end of blocks that are too short.
for (unsigned i = 0; i < length(threadLocalStores); ++i)
{
if (fromPositions[i].i1 == fromPositions[i].i2)
continue;
unsigned j = fromPositions[i].i1;
TRepeatSize len = threadLocalStores[i][j].endPosition - threadLocalStores[i][j].beginPosition;
if (!_repeatMaskValue(text[threadLocalStores[i][j].beginPosition]) && // Never remove mask value.
len <= minRepeatLen)
fromPositions[i].i1 += 1;
if (fromPositions[i].i1 == fromPositions[i].i2)
continue;
j = fromPositions[i].i2 - 1;
len = threadLocalStores[i][j].endPosition - threadLocalStores[i][j].beginPosition;
if (!_repeatMaskValue(text[threadLocalStores[i][j].beginPosition]) && // Never remove mask value.
len <= minRepeatLen)
fromPositions[i].i2 -= 1;
}
// Last, build splitters for output in parallel.
String<unsigned> outSplitters;
appendValue(outSplitters, 0);
for (unsigned i = 0; i < length(threadLocalStores); ++i)
appendValue(outSplitters, back(outSplitters) + fromPositions[i].i2 - fromPositions[i].i1);
// std::cerr << ",-- REPEATS AFTER MENDING\n";
// for (unsigned i = 0; i < length(threadLocalStores); ++i)
// {
// std::cerr << "| i = " << i << std::endl;
// std::cerr << "`--, fromPositions[" << i << "] = (" << fromPositions[i].i1 << ", " << fromPositions[i].i2 << std::endl;
// for (unsigned j = 0; j < length(threadLocalStores[i]); ++j)
// std::cerr << " | threadLocalStores[" << i << "][" << j << "] == {" << threadLocalStores[i][j].beginPosition << ", " << threadLocalStores[i][j].endPosition << "}" << std::endl;
// }
// std::cerr << " `--" << std::endl;
// Allocate memory.
clear(repString);
resize(repString, back(outSplitters));
// Copy back the repeats in parallel.
unsigned nt = length(threadLocalStores);
(void) nt; // Otherwise, GCC 4.6 warns, does not see it used in pragma clause below.
#pragma omp parallel num_threads(nt)
{
int const t = omp_get_thread_num();
arrayCopy(iter(threadLocalStores[t], fromPositions[t].i1, Standard()),
iter(threadLocalStores[t], fromPositions[t].i2, Standard()),
iter(repString, outSplitters[t], Standard()));
} // end of #pragma omp parallel
} else {
#endif // #if SEQAN_ENABLE_PARALLELISM
// Sequential case.
TRepeat rep;
rep.period = 1;
clear(repString);
TIterator it = begin(text, Standard());
TIterator itEnd = end(text, Standard());
if (it == itEnd) return;
TSize repLen = 1;
for (++it; it != itEnd; ++it)
{
if (*it != *(it-1))
{
if (_repeatMaskValue(*(it-1)) || repLen > (TSize)minRepeatLen)
{
// insert repeat
rep.endPosition = it - begin(text, Standard());
rep.beginPosition = rep.endPosition - repLen;
// ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl;
appendValue(repString, rep);
}
repLen = 1;
} else
++repLen;
}
if (_repeatMaskValue(*(it-1)) || repLen > (TSize)minRepeatLen)
{
// insert repeat
rep.endPosition = length(text);
rep.beginPosition = rep.endPosition - repLen;
// ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl;
appendValue(repString, rep);
}
#if SEQAN_ENABLE_PARALLELISM
}
#endif // #if SEQAN_ENABLE_PARALLELISM
// #pragma omp critical
// {
// std::cerr << "thread #" << omp_get_thread_num() << " REPEATS:";
// for (unsigned i = 0; i < length(repString); ++i) {
// std::cerr << " (" << repString[i].beginPosition << ", " << repString[i].endPosition << ", " << repString[i].period << ")";
// }
// std::cerr << std::endl;
// }
}
// TODO(holtgrew): Why for TString const and StringSet<> const?
template <typename TRepeatStore, typename TString, typename TSpec, typename TRepeatSize>
inline void findRepeats(TRepeatStore &repString, StringSet<TString, TSpec> const &text, TRepeatSize minRepeatLen)
{
typedef typename Value<TRepeatStore>::Type TRepeat;
typedef typename Iterator<TString>::Type TIterator;
typedef typename Value<TString>::Type TValue;
typedef typename Size<TString>::Type TSize;
TRepeat rep;
rep.period = 1;
clear(repString);
for (unsigned i = 0; i < length(text); ++i)
{
TIterator it = begin(text[i], Standard());
TIterator itEnd = end(text[i], Standard());
if (it == itEnd) continue;
TValue last = *it;
TSize repLeft = 0;
TSize repRight = 1;
rep.beginPosition.i1 = i;
rep.endPosition.i1 = i;
for (++it; it != itEnd; ++it, ++repRight)
{
if (last != *it)
{
if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen)
{
// insert repeat
rep.beginPosition.i2 = repLeft;
rep.endPosition.i2 = repRight;
// ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl;
appendValue(repString, rep);
}
repLeft = repRight;
last = *it;
}
}
if (_repeatMaskValue(last) || (TRepeatSize)(repRight - repLeft) > minRepeatLen)
{
// insert repeat
rep.beginPosition.i2 = repLeft;
rep.endPosition.i2 = repRight;
// ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl;
appendValue(repString, rep);
}
}
}
// main function
template <typename TRepeatStore, typename TText, typename TRepeatSize, typename TPeriodSize>
void findRepeats(TRepeatStore &repString, TText const &text, TRepeatSize minRepeatLen, TPeriodSize maxPeriod)
{
typedef Index<TText, IndexWotd<TRepeatFinder> > TIndex;
typedef typename Size<TIndex>::Type TSize;
typedef typename Iterator<TIndex, TopDown<ParentLinks<> > >::Type TNodeIterator;
typedef typename Fibre<TIndex, FibreSA>::Type const TSA;
typedef typename Infix<TSA>::Type TOccString;
typedef typename Iterator<TOccString>::Type TOccIterator;
typedef typename Value<TRepeatStore>::Type TRepeat;
typedef typename Value<TOccString>::Type TOcc;
typedef ::std::map<TOcc,TRepeat,RepeatLess_<TOcc> > TRepeatList;
if (maxPeriod < 1) return;
if (maxPeriod == 1)
{
findRepeats(repString, text, minRepeatLen);
return;
}
TIndex index(text);
TRepeatList list;
// set repeat finder parameters
cargo(index).minRepeatLen = minRepeatLen;
cargo(index).maxPeriod = maxPeriod;
TNodeIterator nodeIt(index);
TOccIterator itA, itB, itRepBegin, itEnd;
TRepeat rep;
for (; !atEnd(nodeIt); goNext(nodeIt))
{
if (isRoot(nodeIt)) continue;
// get occurrences
TOccString occ = getOccurrences(nodeIt);
itA = begin(occ, Standard());
itEnd = end(occ, Standard());
itRepBegin = itB = itA;
TSize repLen = repLength(nodeIt); // representative length
if ((TSize)minRepeatLen <= repLen) continue;
TSize diff, period = 0; // period of current repeat
TSize repeatLen = 0; // overall length of current repeat
TSize minLen = minRepeatLen - repLen; // minimum repeat length minus length of representative
for (++itB; itB != itEnd; ++itB)
{
diff = posSub(*itB, *itA);
if (diff != period || getSeqNo(*itA) != getSeqNo(*itB))
{
// is the repeat long enough?
if (repeatLen >= minLen)
// is the repeat self overlapping or connected?
if (parentRepLength(nodeIt) < period && period <= repLen)
{
// insert repeat
rep.beginPosition = *itRepBegin;
rep.endPosition = posAdd(*itA, period);
rep.period = period;
// ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl;
list.insert(::std::pair<TOcc,TRepeat>(rep.beginPosition, rep));
}
itRepBegin = itA;
period = diff;
repeatLen = 0;
}
repeatLen += period;
itA = itB;
}
// is the last repeat long enough?
if (repeatLen >= minLen)
// is the repeat self overlapping or connected?
if (parentRepLength(nodeIt) < period && period <= repLen)
{
// insert repeat
rep.beginPosition = *itRepBegin;
rep.endPosition = posAdd(*itA, period);
rep.period = period;
// ::std::cerr<<"left:"<<rep.beginPosition<<" right:"<<rep.endPosition<<" length:"<<posSub(rep.endPosition,rep.beginPosition)<<" period:"<<rep.period<<::std::endl;
list.insert(::std::pair<TOcc,TRepeat>(rep.beginPosition, rep));
}
}
// copy low-complex regions to result string
clear(repString);
reserve(repString, list.size(), Exact());
typename TRepeatList::const_iterator lit = list.begin();
typename TRepeatList::const_iterator litEnd = list.end();
for (TSize i = 0; lit != litEnd; ++lit, ++i)
appendValue(repString, (*lit).second);
}
} // namespace seqan
#endif
|
AsagiReader.h | /**
* @file
* This file is part of SeisSol.
*
* @author Sebastian Rettenberger (sebastian.rettenberger AT tum.de, http://www5.in.tum.de/wiki/index.php/Sebastian_Rettenberger)
*
* @section LICENSE
* Copyright (c) 2016-2017, SeisSol Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* @section DESCRIPTION
* Velocity field reader Fortran interface
*/
#ifndef ASAGIREADER_H
#define ASAGIREADER_H
#include "Parallel/MPI.h"
#include <asagi.h>
#include <easi/util/AsagiReader.h>
#include "utils/env.h"
#include "utils/logger.h"
#include "AsagiModule.h"
#include "Monitoring/instrumentation.fpp"
namespace seissol
{
namespace asagi
{
enum NUMACache_Mode
{
NUMA_OFF, NUMA_ON, NUMA_CACHE
};
class AsagiReader : public easi::AsagiReader
{
private:
/** Prefix for environment variables */
const std::string m_envPrefix;
/** Number of threads used by ASAGI */
unsigned int m_asagiThreads;
#ifdef USE_MPI
/** MPI communicator used by ASAGI */
MPI_Comm m_comm;
#endif
public:
AsagiReader( const char* envPrefix
#ifdef USE_MPI
, MPI_Comm comm = seissol::MPI::mpi.comm()
#endif
) : m_envPrefix(envPrefix)
#ifdef USE_MPI
, m_comm(comm)
#endif
{}
virtual ::asagi::Grid* open(char const* file, char const* varname);
virtual unsigned numberOfThreads() const { return m_asagiThreads; }
private:
static NUMACache_Mode getNUMAMode();
};
/**
*
* @param file File name of the netCDF file
* @param varname The variable name in the netCDF file
* @return ASAGI grid
*/
::asagi::Grid* AsagiReader::open(const char* file, const char* varname) {
SCOREP_USER_REGION("AsagiReader_open", SCOREP_USER_REGION_TYPE_FUNCTION);
const int rank = seissol::MPI::mpi.rank();
::asagi::Grid* grid = ::asagi::Grid::createArray();
if (utils::Env::get<bool>((m_envPrefix + "_SPARSE").c_str(), false)) {
grid->setParam("GRID", "CACHE");
}
// Set MPI mode
if (AsagiModule::mpiMode() != MPI_OFF) {
#ifdef USE_MPI
::asagi::Grid::Error err = grid->setComm(m_comm);
if (err != ::asagi::Grid::SUCCESS)
logError() << "Could not set ASAGI communicator:" << err;
#endif // USE_MPI
if (AsagiModule::mpiMode() == MPI_COMM_THREAD)
grid->setParam("MPI_COMMUNICATION", "THREAD");
}
// Set NUMA mode
m_asagiThreads = utils::Env::get((m_envPrefix + "_NUM_THREADS").c_str(), 0u);
if (m_asagiThreads == 0)
m_asagiThreads = AsagiModule::totalThreads();
else if (static_cast<int>(m_asagiThreads) > AsagiModule::totalThreads()) {
logWarning(rank) << "Only" << AsagiModule::totalThreads()
<< "threads can be used for ASAGI initialization.";
m_asagiThreads = AsagiModule::totalThreads();
}
if (AsagiModule::mpiMode() == MPI_COMM_THREAD)
m_asagiThreads--; // one thread is used for communication
grid->setThreads(m_asagiThreads);
switch (getNUMAMode()) {
case NUMA_ON:
grid->setParam("NUMA_COMMUNICATION", "ON");
break;
case NUMA_OFF:
grid->setParam("NUMA_COMMUNICATION", "OFF");
break;
case NUMA_CACHE:
grid->setParam("NUMA_COMMUNICATION", "CACHE");
break;
}
// Set vertex centered grid
grid->setParam("VALUE_POSITION", "VERTEX_CENTERED");
// Set additional parameters
std::string blockSize = utils::Env::get((m_envPrefix + "_BLOCK_SIZE").c_str(), "64");
grid->setParam("BLOCK_SIZE_0", blockSize.c_str());
grid->setParam("BLOCK_SIZE_1", blockSize.c_str());
grid->setParam("BLOCK_SIZE_2", blockSize.c_str());
std::string cacheSize = utils::Env::get((m_envPrefix + "_CACHE_SIZE").c_str(), "128");
grid->setParam("CACHE_SIZE", cacheSize.c_str());
grid->setParam("VARIABLE", varname);
bool abort = false;
// Read the data
//SCOREP_RECORDING_OFF();
#ifdef _OPENMP
#pragma omp parallel shared(abort) num_threads(m_asagiThreads)
#endif // _OPENMP
{
::asagi::Grid::Error err = grid->open(file);
if (err != ::asagi::Grid::SUCCESS)
abort = true;
}
//SCOREP_RECORDING_ON();
if (abort) {
logError() << "Could not open " << file << " with ASAGI.";
return nullptr;
}
return grid;
}
NUMACache_Mode AsagiReader::getNUMAMode()
{
const char* numaModeName = utils::Env::get("SEISSOL_ASAGI_NUMA_MODE", "ON");
if (strcmp(numaModeName, "ON") == 0)
return NUMA_ON;
if (strcmp(numaModeName, "OFF") == 0)
return NUMA_OFF;
if (strcmp(numaModeName, "CACHE") == 0)
return NUMA_CACHE;
logError() << "Unknown NUMA mode:" << numaModeName;
return NUMA_OFF;
}
}
}
#endif // ASAGIREADER_H
|
DRB016-outputdep-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The loop in this example cannot be parallelized.
This pattern has two pair of dependencies:
1. loop carried output dependence
x = .. :
2. loop carried true dependence due to:
.. = x;
x = ..;
Data race pairs: we allow two pairs to preserve the original code pattern.
1. x@73:12 vs. x@74:5
2. x@74:5 vs. x@74:5
*/
#include <stdio.h>
int a[100];
int main()
{
int len=100;
int i,x=10;
#pragma omp parallel for schedule(dynamic)
for (i=0;i<len;i++)
{
a[i] = x;
x=i;
}
printf("x=%d",x);
return 0;
}
|
elementary.h | /*!
* \file elementary.h
* \author Jun Yoshida
* \copyright (c) Jun Yoshida 2019
* The project is released under BSD3 License.
* \detail
* Defining elementary operations on matrices.
*/
#ifndef NUMERIC_LINEARALGEBRA_INTEGRAL_ELEMENTARY_H
#define NUMERIC_LINEARALGEBRA_INTEGRAL_ELEMENTARY_H
#include "common.h"
/*!
* Transposition of matrices
*/
static inline
void transpose(matrix_type * restrict mat)
{
SWAP_UNSAFE(mat->c, mat->r);
SWAP_UNSAFE(mat->Xc, mat->Xr);
}
/*!******************************
* \section elem_row_op
* Elementary row oprations
********************************/
/*! Swap two rows of a matrix. */
static inline
void swap_rows(size_t i1, size_t i2, matrix_type * restrict mat)
{
if (i1 == i2) return;
#pragma omp parallel for
for (size_t j = 0; j < mat->c; ++j) {
SWAP_UNSAFE( MATRIX_AT(*mat,i1,j), MATRIX_AT(*mat,i2,j) );
}
}
/*! Swap two rows with upside-down indexing. */
static inline
void swap_rows_ud(size_t i1, size_t i2, matrix_type * restrict mat)
{
if (i1 == i2) return;
#pragma omp parallel for
for (size_t j = 0; j < mat->c; ++j) {
SWAP_UNSAFE( MATRIX_UDAT(*mat,i1,j), MATRIX_UDAT(*mat,i2,j) );
}
}
/*! Scalar multiple of a row. */
static inline
void scalar_row(size_t i, target_type s, matrix_type * restrict mat)
{
#pragma omp parallel for
for (size_t j = 0; j < mat->c; ++j)
MATRIX_AT(*mat, i, j) *= s;
}
/*! Scalar multiple of a row with upside-down indexing. */
static inline
void scalar_row_ud(size_t i, target_type s, matrix_type * restrict mat)
{
#pragma omp parallel for
for (size_t j = 0; j < mat->c; ++j)
MATRIX_UDAT(*mat, i, j) *= s;
}
/*! Add scalar multiple of a row to another. */
static inline
void axpy_rows(target_type s, size_t i_src, size_t i_dest, matrix_type * restrict mat )
{
#pragma omp parallel for
for (size_t j = 0; j < mat->c; ++j) {
target_type_huge aux = MATRIX_AT(*mat, i_src,j);
aux *= s;
aux += MATRIX_AT(*mat, i_dest, j);
MATRIX_AT(*mat, i_dest, j) = aux;
}
}
/*! Add scalar multiple of a row to another with upside-down indexing. */
static inline
void axpy_rows_ud(target_type s, size_t i_src, size_t i_dest, matrix_type * restrict mat )
{
#pragma omp parallel for
for (size_t j = 0; j < mat->c; ++j) {
target_type_huge aux = MATRIX_UDAT(*mat, i_src,j);
aux *= s;
aux += MATRIX_UDAT(*mat, i_dest, j);
MATRIX_UDAT(*mat, i_dest, j) = aux;
}
}
/*!******************************
* \section elem_col_op
* Elementary column oprations
********************************/
/*! Swap two columns of a matrix. */
static inline
void swap_columns(size_t j1, size_t j2, matrix_type * restrict mat)
{
if (j1 == j2) return;
#pragma omp parallel for
for (size_t i = 0; i < mat->r; ++i) {
SWAP_UNSAFE( MATRIX_AT(*mat,i,j1), MATRIX_AT(*mat,i,j2) );
}
}
/*! Swap two columns of a matrix with right-to-left indexing. */
static inline
void swap_columns_rl(size_t j1, size_t j2, matrix_type * restrict mat)
{
if (j1 == j2) return;
#pragma omp parallel for
for (size_t i = 0; i < mat->r; ++i) {
SWAP_UNSAFE( MATRIX_RLAT(*mat,i,j1), MATRIX_RLAT(*mat,i,j2) );
}
}
/*! Scalar multiple of a column. */
static inline
void scalar_column(size_t j, target_type s, matrix_type * restrict mat)
{
#pragma omp parallel for
for (size_t i = 0; i < mat->r; ++i)
MATRIX_AT(*mat, i, j) *= s;
}
/*! Scalar multiple of a column with right-to-left indexing. */
static inline
void scalar_column_rl(size_t j, target_type s, matrix_type * restrict mat)
{
#pragma omp parallel for
for (size_t i = 0; i < mat->r; ++i)
MATRIX_RLAT(*mat, i, j) *= s;
}
/*! Add scalar multiple of a row to another. */
static inline
void axpy_columns(target_type s, size_t j_src, size_t j_dest, matrix_type * restrict mat )
{
#pragma omp parallel for
for (size_t i = 0; i < mat->r; ++i) {
target_type_huge aux = MATRIX_AT(*mat, i, j_src);
aux *= s;
aux += MATRIX_AT(*mat, i, j_dest);
MATRIX_AT(*mat, i, j_dest) = aux;
}
}
/*! Add scalar multiple of a row to another with right-to-left indexing. */
static inline
void axpy_columns_rl(target_type s, size_t j_src, size_t j_dest, matrix_type * restrict mat )
{
#pragma omp parallel for
for (size_t i = 0; i < mat->r; ++i) {
target_type_huge aux = MATRIX_RLAT(*mat, i, j_src);
aux *= s;
aux += MATRIX_RLAT(*mat, i, j_dest);
MATRIX_RLAT(*mat, i, j_dest) = aux;
}
}
#endif
|
collective_reduction.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Implements the collective reduce and allreduce mixed */
/* mode OpenMP/MPI benchmarks. */
/*-----------------------------------------------------------*/
#include "collective_reduction.h"
/*-----------------------------------------------------------*/
/* reduction */
/* */
/* Driver subroutine for the reduce and allReduce */
/* benchmarks. */
/*-----------------------------------------------------------*/
int reduction(int benchmarkType){
int dataSizeIter, sizeofBuf;
/* Initialise repsToDo to defaultReps */
repsToDo = defaultReps;
/* Start loop over data sizes */
dataSizeIter = minDataSize; /* initialise dataSizeIter */
while (dataSizeIter <= maxDataSize){
/* allocate space for the main data arrays.. */
allocateReduceData(dataSizeIter);
/* Perform benchmark warm-up */
if (benchmarkType == REDUCE){
reduceKernel(warmUpIters, dataSizeIter);
/* Master process tests if reduce was a success */
if (myMPIRank == 0){
testReduce(dataSizeIter, benchmarkType);
}
}
else if (benchmarkType == ALLREDUCE){
/* calculate sizeofBuf for test */
sizeofBuf = dataSizeIter * numThreads;
allReduceKernel(warmUpIters, dataSizeIter);
/* all processes need to perform unit test */
testReduce(sizeofBuf, benchmarkType);
}
/* Initialise the benchmark */
benchComplete = FALSE;
/* Execute benchmark until target time is reached */
while (benchComplete != TRUE){
/* Start timer */
MPI_Barrier(comm);
startTime = MPI_Wtime();
/* Execute reduce for repsToDo repetitions */
if (benchmarkType == REDUCE){
reduceKernel(repsToDo, dataSizeIter);
}
else if (benchmarkType == ALLREDUCE){
allReduceKernel(repsToDo, dataSizeIter);
}
/* Stop timer */
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Test if target time was reached with the number of reps */
if (myMPIRank==0){
benchComplete = repTimeCheck(totalTime, repsToDo);
}
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
}
/* Master process sets benchmark result for reporting */
if (myMPIRank == 0){
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free allocated data */
freeReduceData();
/* Double dataSize and loop again */
dataSizeIter = dataSizeIter * 2;
}
return 0;
}
/*-----------------------------------------------------------*/
/* reduceKernel */
/* */
/* Implements the reduce mixed mode benchmark. */
/* Each thread under every MPI process combines its local */
/* buffer. This is then sent to the master MPI process to */
/* get the overall reduce value. */
/*-----------------------------------------------------------*/
int reduceKernel(int totalReps, int dataSize){
int repIter, i, j;
for (repIter=1; repIter<totalReps; repIter++){
/* Manually perform the reduction between OpenMP threads */
#pragma omp parallel default(none) \
private(i,j) \
shared(tempBuf,globalIDarray,dataSize,numThreads) \
shared(localReduceBuf)
{
/* 1) Intialise the tempBuf array */
#pragma omp for schedule(static,dataSize)
for(i=0; i<(numThreads * dataSize); i++){
tempBuf[i] = globalIDarray[myThreadID];
}
/* 2) Reduce tempBuf into localReduceBuf */
#pragma omp for
for(i=0; i<dataSize; i++){
localReduceBuf[i] = 0;
for (j=0; j<numThreads; j++){
localReduceBuf[i] += tempBuf[(j*dataSize)+i];
}
}
}
/* Perform a reduce of localReduceBuf across the
* MPI processes.
*/
MPI_Reduce(localReduceBuf, globalReduceBuf, dataSize,\
MPI_INT, MPI_SUM, 0, comm);
/* Copy globalReduceBuf into master Threads portion
* of finalReduceBuf.
*/
// FR this should only happen on rank == 0 thus added if to ensure this
if (myMPIRank==0) {
for (i=0; i<dataSize; i++){
finalReduceBuf[i] = globalReduceBuf[i];
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allReduce */
/* */
/* Implements the allreduce mixed mode benchmark. */
/* Each thread under every MPI process combines its local */
/* buffer. All MPI processes then combine this value to */
/* the overall reduction value at each process. */
/*-----------------------------------------------------------*/
int allReduceKernel(int totalReps, int dataSize){
int repIter, i, j;
int startPos;
for (repIter=0; repIter<totalReps; repIter++){
/* Manually perform the reduction between OpenMP threads */
#pragma omp parallel default(none) \
private(i,j) \
shared(tempBuf,globalIDarray,dataSize,numThreads) \
shared(localReduceBuf)
{
/* 1) Intialise the tempBuf array */
#pragma omp for schedule(static,dataSize)
for(i=0; i<(numThreads * dataSize); i++){
tempBuf[i] = globalIDarray[myThreadID];
}
/* 2) Reduce tempBuf into localReduceBuf */
#pragma omp for
for(i=0; i<dataSize; i++){
localReduceBuf[i] = 0;
for (j=0; j<numThreads; j++){
localReduceBuf[i] += tempBuf[(j*dataSize)+i];
}
}
}
/* Perform an all reduce of localReduceBuf across
* the MPI processes.
*/
MPI_Allreduce(localReduceBuf, globalReduceBuf, \
dataSize, MPI_INTEGER, MPI_SUM, comm);
/* Each thread copies globalReduceBuf into its portion
* of finalReduceBuf.
*/
#pragma omp parallel default(none) \
private(i,startPos) \
shared(dataSize,finalReduceBuf,globalReduceBuf)
{
/* Calculate the start of each threads portion
* of finalReduceBuf.
*/
startPos = (myThreadID * dataSize);
for (i=0; i<dataSize; i++){
finalReduceBuf[startPos + i] = globalReduceBuf[i];
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allocateReduceData */
/* */
/* Allocate memory for the main data arrays in the */
/* reduction operation. */
/*-----------------------------------------------------------*/
int allocateReduceData(int bufferSize){
localReduceBuf = (int *) malloc(bufferSize * sizeof(int));
globalReduceBuf = (int *) malloc(bufferSize * sizeof(int));
/* tempBuf and Final reduce is of size dataSize*numThreads */
tempBuf = (int *) malloc((bufferSize * numThreads) * sizeof(int));
finalReduceBuf = (int *) malloc((bufferSize * numThreads) * sizeof(int));
return 0;
}
/*-----------------------------------------------------------*/
/* freeReduceData */
/* */
/* Free allocated memory for main data arrays. */
/*-----------------------------------------------------------*/
int freeReduceData(){
free(localReduceBuf);
free(globalReduceBuf);
free(tempBuf);
free(finalReduceBuf);
return 0;
}
/*-----------------------------------------------------------*/
/* testReduce */
/* */
/* Verifies that the reduction benchmarks worked correctly. */
/*-----------------------------------------------------------*/
int testReduce(int bufferSize, int benchmarkType){
int i, testFlag, reduceFlag;
int correctReduce, lastGlobalID;
/* Initialise correctReduce to 0.. */
correctReduce = 0;
/* ..and testFlag to true */
testFlag = TRUE;
/* set lastGlobalID */
lastGlobalID = (numMPIprocs * numThreads);
/* Now find correctReduce value by summing to lastGlobalID */
for (i=0; i<lastGlobalID; i++){
correctReduce = correctReduce + i;
}
/* Compare each element of finalRecvBuf to correctRedcue */
for (i=0; i<bufferSize; i++){
if (finalReduceBuf[i] != correctReduce){
testFlag = FALSE;
}
}
/* For allReduce, combine testFlag into master
* with logical AND.
*/
if (benchmarkType == ALLREDUCE){
MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);
/* then master sets testOutcome using reduceFlag */
if (myMPIRank == 0){
setTestOutcome(reduceFlag);
}
}
else{
/* for reduce master process just sets testOurcome using testFlag */
setTestOutcome(testFlag);
}
return 0;
}
|
requantize_relu_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void requantize_relu_pack8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
int scale_in_data_size = scale_in_data.w;
int scale_out_data_size = scale_out_data.w;
int bias_data_size = bias_data.w;
// int8(relu(v * scale_in) * scale_out)
// int8_relu(v * (scale_in * scale_out))
// int8(relu(v * scale_in + bias) * scale_out)
// int8_relu(v * (scale_in * scale_out) + (bias * scale_out))
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
float32x4_t _v4 = vcvtq_f32_s32(vld1q_s32(intptr + 16));
float32x4_t _v5 = vcvtq_f32_s32(vld1q_s32(intptr + 20));
float32x4_t _v6 = vcvtq_f32_s32(vld1q_s32(intptr + 24));
float32x4_t _v7 = vcvtq_f32_s32(vld1q_s32(intptr + 28));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
_v2 = vmulq_f32(_v2, _scale0);
_v3 = vmulq_f32(_v3, _scale1);
_v4 = vmulq_f32(_v4, _scale0);
_v5 = vmulq_f32(_v5, _scale1);
_v6 = vmulq_f32(_v6, _scale0);
_v7 = vmulq_f32(_v7, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
vst1_s8(ptr + 8, float2int8relu(_v2, _v3));
vst1_s8(ptr + 16, float2int8relu(_v4, _v5));
vst1_s8(ptr + 24, float2int8relu(_v6, _v7));
intptr += 32;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
_v2 = vmulq_f32(_v2, _scale0);
_v3 = vmulq_f32(_v3, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
vst1_s8(ptr + 8, float2int8relu(_v2, _v3));
intptr += 16;
ptr += 16;
}
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
intptr += 8;
ptr += 8;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _bias0 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8);
float32x4_t _bias1 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
_bias0 = vmulq_f32(_bias0, _scale_out0);
_bias1 = vmulq_f32(_bias1, _scale_out1);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
float32x4_t _v4 = vcvtq_f32_s32(vld1q_s32(intptr + 16));
float32x4_t _v5 = vcvtq_f32_s32(vld1q_s32(intptr + 20));
float32x4_t _v6 = vcvtq_f32_s32(vld1q_s32(intptr + 24));
float32x4_t _v7 = vcvtq_f32_s32(vld1q_s32(intptr + 28));
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
_v2 = vfmaq_f32(_bias0, _v2, _scale0);
_v3 = vfmaq_f32(_bias1, _v3, _scale1);
_v4 = vfmaq_f32(_bias0, _v4, _scale0);
_v5 = vfmaq_f32(_bias1, _v5, _scale1);
_v6 = vfmaq_f32(_bias0, _v6, _scale0);
_v7 = vfmaq_f32(_bias1, _v7, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
vst1_s8(ptr + 8, float2int8relu(_v2, _v3));
vst1_s8(ptr + 16, float2int8relu(_v4, _v5));
vst1_s8(ptr + 24, float2int8relu(_v6, _v7));
intptr += 32;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
_v2 = vfmaq_f32(_bias0, _v2, _scale0);
_v3 = vfmaq_f32(_bias1, _v3, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
vst1_s8(ptr + 8, float2int8relu(_v2, _v3));
intptr += 16;
ptr += 16;
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d8-d15} \n"
"vmov q0, %q6 \n"
"vmov q1, %q7 \n"
"vmov q2, %q6 \n"
"vmov q3, %q7 \n"
"vcvt.f32.s32 q4, q4 \n"
"vcvt.f32.s32 q5, q5 \n"
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"veor q8, q8 \n" // _zero
"vmla.f32 q0, q4, %q4 \n"
"vmla.f32 q1, q5, %q5 \n"
"vmla.f32 q2, q6, %q4 \n"
"vmla.f32 q3, q7, %q5 \n"
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
"vcvtr.s32.f32 s8, s8 \n"
"vcvtr.s32.f32 s9, s9 \n"
"vcvtr.s32.f32 s10, s10 \n"
"vcvtr.s32.f32 s11, s11 \n"
"vcvtr.s32.f32 s12, s12 \n"
"vcvtr.s32.f32 s13, s13 \n"
"vcvtr.s32.f32 s14, s14 \n"
"vcvtr.s32.f32 s15, s15 \n"
"vqmovn.s32 d8, q0 \n"
"vqmovn.s32 d9, q1 \n"
"vqmovn.s32 d10, q2 \n"
"vqmovn.s32 d11, q3 \n"
"vqmovn.s16 d8, q4 \n"
"vqmovn.s16 d9, q5 \n"
"vmax.s8 q4, q4, q8 \n"
"vst1.s8 {d8-d9}, [%1 :128]! \n"
: "=r"(intptr),
"=r"(ptr)
: "0"(intptr),
"1"(ptr),
"w"(_scale0), // %4
"w"(_scale1), // %5
"w"(_bias0), // %6
"w"(_bias1) // %7
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
_v0 = vmlaq_f32(_bias0, _v0, _scale0);
_v1 = vmlaq_f32(_bias1, _v1, _scale1);
vst1_s8(ptr, float2int8relu(_v0, _v1));
intptr += 8;
ptr += 8;
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.s32 {d4-d7}, [%0 :128]! \n"
"vmov q0, %q6 \n"
"vmov q1, %q7 \n"
"vcvt.f32.s32 q2, q2 \n"
"vcvt.f32.s32 q3, q3 \n"
"veor d8, d8 \n" // _zero
"vmla.f32 q0, q2, %q4 \n"
"vmla.f32 q1, q3, %q5 \n"
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
"vqmovn.s32 d4, q0 \n"
"vqmovn.s32 d5, q1 \n"
"vqmovn.s16 d4, q2 \n"
"vmax.s8 d4, d4, d8 \n"
"vst1.s8 {d4}, [%1 :64]! \n"
: "=r"(intptr),
"=r"(ptr)
: "0"(intptr),
"1"(ptr),
"w"(_scale0), // %4
"w"(_scale1), // %5
"w"(_bias0), // %6
"w"(_bias1) // %7
: "memory", "q0", "q1", "q2", "q3", "q4");
#endif // __aarch64__
}
}
}
}
|
depth-metrics.h | // License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
//
// Plane Fit implementation follows http://www.ilikebigbits.com/blog/2015/3/2/plane-from-points algorithm
#pragma once
#include <vector>
#include <mutex>
#include <array>
#include <imgui.h>
#include <librealsense2/rsutil.h>
#include <librealsense2/rs.hpp>
#include "rendering.h"
namespace rs2
{
namespace depth_quality
{
struct snapshot_metrics
{
int width;
int height;
rs2::region_of_interest roi;
float distance;
float angle;
float angle_x;
float angle_y;
plane p;
std::array<float3, 4> plane_corners;
};
struct single_metric_data
{
single_metric_data(std::string name, float val) :
val(val), name(name) {}
float val;
std::string name;
};
using callback_type = std::function<void(
const std::vector<rs2::float3>& points,
const plane p,
const rs2::region_of_interest roi,
const float baseline_mm,
const float focal_length_pixels,
const int ground_thruth_mm,
const bool plane_fit,
const float plane_fit_to_ground_truth_mm,
const float distance_mm,
bool record,
std::vector<single_metric_data>& samples)>;
inline plane plane_from_point_and_normal(const rs2::float3& point, const rs2::float3& normal)
{
return{ normal.x, normal.y, normal.z, -(normal.x*point.x + normal.y*point.y + normal.z*point.z) };
}
//Based on: http://www.ilikebigbits.com/blog/2015/3/2/plane-from-points
inline plane plane_from_points(const std::vector<rs2::float3> points)
{
if (points.size() < 3) throw std::runtime_error("Not enough points to calculate plane");
rs2::float3 sum = { 0,0,0 };
for (auto point : points) sum = sum + point;
rs2::float3 centroid = sum / float(points.size());
double xx = 0, xy = 0, xz = 0, yy = 0, yz = 0, zz = 0;
for (auto point : points) {
rs2::float3 temp = point - centroid;
xx += temp.x * temp.x;
xy += temp.x * temp.y;
xz += temp.x * temp.z;
yy += temp.y * temp.y;
yz += temp.y * temp.z;
zz += temp.z * temp.z;
}
double det_x = yy*zz - yz*yz;
double det_y = xx*zz - xz*xz;
double det_z = xx*yy - xy*xy;
double det_max = std::max({ det_x, det_y, det_z });
if (det_max <= 0) return{ 0, 0, 0, 0 };
rs2::float3 dir{};
if (det_max == det_x)
{
float a = static_cast<float>((xz*yz - xy*zz) / det_x);
float b = static_cast<float>((xy*yz - xz*yy) / det_x);
dir = { 1, a, b };
}
else if (det_max == det_y)
{
float a = static_cast<float>((yz*xz - xy*zz) / det_y);
float b = static_cast<float>((xy*xz - yz*xx) / det_y);
dir = { a, 1, b };
}
else
{
float a = static_cast<float>((yz*xy - xz*yy) / det_z);
float b = static_cast<float>((xz*xy - yz*xx) / det_z);
dir = { a, b, 1 };
}
return plane_from_point_and_normal(centroid, dir.normalize());
}
inline double evaluate_pixel(const plane& p, const rs2_intrinsics* intrin, float x, float y, float distance, float3& output)
{
float pixel[2] = { x, y };
rs2_deproject_pixel_to_point(&output.x, intrin, pixel, distance);
return evaluate_plane(p, output);
}
inline float3 approximate_intersection(const plane& p, const rs2_intrinsics* intrin, float x, float y, float min, float max)
{
float3 point;
auto far = evaluate_pixel(p, intrin, x, y, max, point);
if (fabs(max - min) < 1e-3) return point;
auto near = evaluate_pixel(p, intrin, x, y, min, point);
if (far*near > 0) return{ 0, 0, 0 };
auto avg = (max + min) / 2;
auto mid = evaluate_pixel(p, intrin, x, y, avg, point);
if (mid*near < 0) return approximate_intersection(p, intrin, x, y, min, avg);
return approximate_intersection(p, intrin, x, y, avg, max);
}
inline float3 approximate_intersection(const plane& p, const rs2_intrinsics* intrin, float x, float y)
{
return approximate_intersection(p, intrin, x, y, 0.f, 1000.f);
}
inline snapshot_metrics analyze_depth_image(
const rs2::video_frame& frame,
float units, float baseline_mm,
const rs2_intrinsics * intrin,
rs2::region_of_interest roi,
const int ground_truth_mm,
bool plane_fit_present,
std::vector<single_metric_data>& samples,
bool record,
callback_type callback)
{
auto pixels = (const uint16_t*)frame.get_data();
const auto w = frame.get_width();
const auto h = frame.get_height();
snapshot_metrics result{ w, h, roi, {} };
std::mutex m;
std::vector<rs2::float3> roi_pixels;
//#pragma omp parallel for - TODO optimization envisaged
for (int y = roi.min_y; y < roi.max_y; ++y)
for (int x = roi.min_x; x < roi.max_x; ++x)
{
auto depth_raw = pixels[y*w + x];
if (depth_raw)
{
// units is float
float pixel[2] = { float(x), float(y) };
float point[3];
auto distance = depth_raw * units;
rs2_deproject_pixel_to_point(point, intrin, pixel, distance);
std::lock_guard<std::mutex> lock(m);
roi_pixels.push_back({ point[0], point[1], point[2] });
}
}
if (roi_pixels.size() < 3) { // Not enough pixels in RoI to fit a plane
return result;
}
plane p = plane_from_points(roi_pixels);
if (p == plane{ 0, 0, 0, 0 }) { // The points in RoI don't span a valid plane
return result;
}
// Calculate intersection of the plane fit with a ray along the center of ROI
// that by design coincides with the center of the frame
float3 plane_fit_pivot = approximate_intersection(p, intrin, intrin->width / 2.f, intrin->height / 2.f);
float plane_fit_to_gt_offset_mm = (ground_truth_mm > 0.f) ? (plane_fit_pivot.z * 1000 - ground_truth_mm) : 0;
result.p = p;
result.plane_corners[0] = approximate_intersection(p, intrin, float(roi.min_x), float(roi.min_y));
result.plane_corners[1] = approximate_intersection(p, intrin, float(roi.max_x), float(roi.min_y));
result.plane_corners[2] = approximate_intersection(p, intrin, float(roi.max_x), float(roi.max_y));
result.plane_corners[3] = approximate_intersection(p, intrin, float(roi.min_x), float(roi.max_y));
// Distance of origin (the camera) from the plane is encoded in parameter D of the plane
// The parameter represents the euclidian distance (along plane normal) from camera to the plane
result.distance = static_cast<float>(-p.d * 1000);
// Angle can be calculated from param C
result.angle = static_cast<float>(std::acos(std::abs(p.c)) / M_PI * 180.);
callback(roi_pixels, p, roi, baseline_mm, intrin->fx, ground_truth_mm, plane_fit_present,
plane_fit_to_gt_offset_mm, result.distance, record, samples);
// Calculate normal
auto n = float3{ p.a, p.b, p.c };
auto cam = float3{ 0.f, 0.f, -1.f };
auto dot = n * cam;
auto u = cam - n * dot;
result.angle_x = u.x;
result.angle_y = u.y;
return result;
}
}
}
|
mandelbrot_area2.c | #include<stdio.h>
#include<omp.h>
#define NPOINTS 1000
#define MXITR 1000
struct d_complex {
double r;
double i;
};
void testpoint (struct d_complex);
struct d_complex c;
int numoutside = 0;
int main(){
int i, j;
double area, error, eps = 1.0e-5;
#pragma omp parallel for default(shared) private(c, j) firstprivate(eps)
for (i=0; i<NPOINTS;i++){
for (j=0; j<NPOINTS; j++) {
c.r = -2.0+2.5*(double)(i)/(double)(NPOINTS)+eps;
c.i = 1.125*(double)(j)/(double)(NPOINTS)+eps;
testpoint(c);
}
}
area = 2.0*2.5*1.125*(double)(NPOINTS*NPOINTS-numoutside)/(double)(NPOINTS*NPOINTS);
error = area/(double)NPOINTS;
printf("area = %f\nerror = %f\n", area, error);
return 0;
}
void testpoint(struct d_complex c){
struct d_complex z;
int iter;
double temp;
z = c;
for (iter=0; iter<MXITR; iter++){
temp = (z.r*z.r)-(z.i*z.i)+c.r;
z.i = z.r*z.i*2+c.i;
z.r = temp;
if ((z.r*z.r + z.i*z.i)>4.0){
#pragma omp atomic
numoutside++;
break;
}
}
}
|
raytracer.h | #ifndef H_RAYTRACER
#define H_RAYTRACER
#include <omp.h>
#include <math.h>
#include "types.h"
#include "constants.h"
#include "application.h"
#include "globals.h"
#include "kernels.h"
#ifndef _OPENMP
#warning Openmp not enabled!
#endif
class Raytracer : public Application
{
private:
float* screenBuffer;
uint max_depth;
float3 radiance(const Ray& ray, int iteration = 0);
public:
SceneBuffers sceneBuffers;
Raytracer(Scene& scene, GLuint luminanceTexture, GLuint albedoTexture)
: Application(scene, luminanceTexture, albedoTexture) {}
virtual void Init() override;
virtual void Render(const Camera& camera, float currentTime, float frameTime, bool shouldClear) override;
virtual void Finish() override {}
};
void Raytracer::Init()
{
sceneBuffers.vertices = scene.allVertices.data();
sceneBuffers.vertexData = scene.allVertexData.data();
sceneBuffers.instances = scene.instances;
sceneBuffers.models = scene.models.data();
sceneBuffers.materials = scene.materials.data();
sceneBuffers.topBvh = scene.topLevelBVH.data();
sceneBuffers.spheres = scene.spheres.data();
sceneBuffers.num_spheres = scene.spheres.size();
sceneBuffers.planes = scene.planes.data();
sceneBuffers.num_planes = scene.planes.size();
screenBuffer = (float*)malloc(4 * NR_PIXELS * sizeof(float));
#ifdef _OPENMP
omp_set_num_threads(8);
#endif
// Initialize the albedo texture to white because we won't use it anyway.
for(uint i=0; i<NR_PIXELS*4; i++)
{
screenBuffer[i] = 1.0f;
}
glBindTexture(GL_TEXTURE_2D, albedoTexture);
glTextureSubImage2D(luminanceTexture, 0, 0, 0, WINDOW_WIDTH, WINDOW_HEIGHT, GL_RGBA, GL_FLOAT, screenBuffer);
glBindTexture(GL_TEXTURE_2D, 0);
}
void Raytracer::Render(const Camera& camera, float currentTime, float frameTime, bool shouldClear)
{
max_depth = shouldClear ? 2 : 7;
#pragma omp parallel for schedule (dynamic)
for(uint i=0; i<NR_PIXELS; i++)
{
uint y = i / WINDOW_WIDTH;
uint x = i % WINDOW_WIDTH;
const Ray ray = camera.getRay(x, y);
float3 color = radiance(ray);
screenBuffer[x * 4 + y * 4 * WINDOW_WIDTH + 0] = color.x;
screenBuffer[x * 4 + y * 4 * WINDOW_WIDTH + 1] = color.y;
screenBuffer[x * 4 + y * 4 * WINDOW_WIDTH + 2] = color.z;
screenBuffer[x * 4 + y * 4 * WINDOW_WIDTH + 3] = 1;
}
glBindTexture(GL_TEXTURE_2D, luminanceTexture);
glTextureSubImage2D(luminanceTexture, 0, 0, 0, WINDOW_WIDTH, WINDOW_HEIGHT, GL_RGBA, GL_FLOAT, screenBuffer);
glBindTexture(GL_TEXTURE_2D, 0);
}
float3 Raytracer::radiance(const Ray& ray, int iteration)
{
if (iteration >= max_depth) return make_float3(0);
HitInfo hitInfo = traverseTopLevel<false>(sceneBuffers, ray);
if (!hitInfo.intersected()) return make_float3(0.2, 0.3, 0.6);
const Instance* instance = sceneBuffers.instances + hitInfo.instance_id;
float3 intersectionPos = ray.origin + hitInfo.t * ray.direction;
float3 originalNormal = getColliderNormal(sceneBuffers, hitInfo, intersectionPos);
if (hitInfo.primitive_type == TRIANGLE)
{
originalNormal = normalize(instance->transform.mul(originalNormal, 0.0f));
}
bool inside = dot(ray.direction, originalNormal) > 0;
const float3 colliderNormal = inside ? -originalNormal : originalNormal;
const uint material_id = getColliderMaterialID(sceneBuffers, hitInfo, instance);
Material material = sceneBuffers.materials[material_id];
float3 diffuse_color = make_float3(0);
float3 refract_color = make_float3(0);
float3 reflect_color = make_float3(0);
if (hitInfo.primitive_type == PLANE)
{
uint px = (uint)(fabsf(intersectionPos.x/4));
uint py = (uint)(fabsf(intersectionPos.z/4));
material.diffuse_color = (px + py)%2 == 0 ? make_float3(1) : make_float3(0.2);
}
float transmission = material.transmit;
float reflect = material.reflect;
float diffuse = 1 - transmission - reflect;
if (diffuse > 0) {
for (int i = 0; i < scene.pointLights.size(); i++) {
const PointLight &light = scene.pointLights[i];
float3 fromLight = intersectionPos - light.pos;
// we occlude ourselves
if (dot(fromLight, colliderNormal) >= 0) continue;
float dis2light2 = dot(fromLight, fromLight);
float dis2light = sqrtf(dis2light2);
fromLight /= dis2light;
Ray shadowRay(light.pos + EPS * fromLight, fromLight, 0);
shadowRay.length = dis2light - 2 * EPS;
HitInfo shadowHit = traverseTopLevel<true>(sceneBuffers, shadowRay);
if (!shadowHit.intersected()) {
diffuse_color += light.color * dot(-fromLight, colliderNormal) / dis2light2;
}
}
}
if (transmission > 0)
{
float changed_reflection = 0;
Ray refractRay = getRefractRay(ray, colliderNormal, intersectionPos, material, inside, changed_reflection);
transmission -= changed_reflection;
reflect += changed_reflection;
if (transmission > 0) {
refract_color = radiance(refractRay, iteration+1);
if (inside)
{
// Take away any absorpted light using Beer's law. when leaving the object
float3 c = material.absorption;
refract_color = refract_color * make_float3(expf(-c.x * hitInfo.t), expf(-c.y *hitInfo.t), expf(-c.z * hitInfo.t));
}
}
}
if (reflect > 0)
{
Ray reflectRay = getReflectRay(ray, colliderNormal, intersectionPos);
reflect_color = radiance(reflectRay, iteration+1);
}
return material.diffuse_color * (diffuse * diffuse_color + transmission * refract_color + reflect * reflect_color);
}
#endif
|
ft.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - FT
This benchmark is an OpenMP C version of the NPB FT code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: D. Bailey
W. Saphir
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
/* global variables */
#include "global.h"
/* function declarations */
static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX],
int t, int indexmap[NZ][NY][NX], int d[3]);
static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]);
static void ipow46(double a, int exponent, double *result);
static void setup(void);
static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]);
static void print_timers(void);
static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]);
static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]);
static void fft_init (int n);
static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]);
static void fftz2 (int is, int l, int m, int n, int ny, int ny1,
dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]);
static int ilog2(int n);
static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]);
static void verify (int d1, int d2, int d3, int nt,
boolean *verified, char *class);
/*--------------------------------------------------------------------
c FT benchmark
c-------------------------------------------------------------------*/
int main(int argc, char **argv) {
/*c-------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i, ierr;
/*------------------------------------------------------------------
c u0, u1, u2 are the main arrays in the problem.
c Depending on the decomposition, these arrays will have different
c dimensions. To accomodate all possibilities, we allocate them as
c one-dimensional arrays and pass them to subroutines for different
c views
c - u0 contains the initial (transformed) initial condition
c - u1 and u2 are working arrays
c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the
c time evolution operator.
c-----------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Large arrays are in common so that they are allocated on the
c heap rather than the stack. This common block is not
c referenced directly anywhere else. Padding is to avoid accidental
c cache problems, since all array sizes are powers of two.
c-------------------------------------------------------------------*/
static dcomplex u0[NZ][NY][NX];
static dcomplex pad1[3];
static dcomplex u1[NZ][NY][NX];
static dcomplex pad2[3];
static dcomplex u2[NZ][NY][NX];
static dcomplex pad3[3];
static int indexmap[NZ][NY][NX];
int iter;
int nthreads = 1;
double total_time, mflops;
boolean verified;
char class;
/*--------------------------------------------------------------------
c Run the entire problem once to make sure all data is touched.
c This reduces variable startup costs, which is important for such a
c short benchmark. The other NPB 2 implementations are similar.
c-------------------------------------------------------------------*/
for (i = 0; i < T_MAX; i++) {
timer_clear(i);
}
setup();
compute_indexmap(indexmap, dims[2]);
compute_initial_conditions(u1, dims[0]);
fft_init (dims[0][0]);
fft(1, u1, u0);
/*--------------------------------------------------------------------
c Start over from the beginning. Note that all operations must
c be timed, in contrast to other benchmarks.
c-------------------------------------------------------------------*/
for (i = 0; i < T_MAX; i++) {
timer_clear(i);
}
timer_start(T_TOTAL);
if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP);
compute_indexmap(indexmap, dims[2]);
compute_initial_conditions(u1, dims[0]);
fft_init (dims[0][0]);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_SETUP);
}
if (TIMERS_ENABLED == TRUE) {
timer_start(T_FFT);
}
fft(1, u1, u0);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_FFT);
}
for (iter = 1; iter <= niter; iter++) {
if (TIMERS_ENABLED == TRUE) {
timer_start(T_EVOLVE);
}
evolve(u0, u1, iter, indexmap, dims[0]);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_EVOLVE);
}
if (TIMERS_ENABLED == TRUE) {
timer_start(T_FFT);
}
fft(-1, u1, u2);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_FFT);
}
if (TIMERS_ENABLED == TRUE) {
timer_start(T_CHECKSUM);
}
checksum(iter, u2, dims[0]);
if (TIMERS_ENABLED == TRUE) {
timer_stop(T_CHECKSUM);
}
}
verify(NX, NY, NZ, niter, &verified, &class);
{
#if defined(_OPENMP)
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(T_TOTAL);
total_time = timer_read(T_TOTAL);
if( total_time != 0.0) {
mflops = 1.0e-6*(double)(NTOTAL) *
(14.8157+7.19641*log((double)(NTOTAL))
+ (5.23518+7.21113*log((double)(NTOTAL)))*niter)
/total_time;
} else {
mflops = 0.0;
}
c_print_results("FT", class, NX, NY, NZ, niter, nthreads,
total_time, mflops, " floating point", verified,
NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
if (TIMERS_ENABLED == TRUE) print_timers();
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX],
int t, int indexmap[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c evolve u0 -> u1 (t time steps) in fourier space
c-------------------------------------------------------------------*/
int i, j, k;
#pragma omp parallel for private(i ,j ,k )
for (k = 0; k < d[2]; k++) {
for (j = 0; j < d[1]; j++) {
for (i = 0; i < d[0]; i++) {
crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Fill in array u0 with initial conditions from
c random number generator
c-------------------------------------------------------------------*/
int k;
double x0, start, an, dummy;
static double tmp[NX*2*MAXDIM+1];
int i,j,t;
start = SEED;
/*--------------------------------------------------------------------
c Jump to the starting element for our first plane.
c-------------------------------------------------------------------*/
ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an);
dummy = randlc(&start, an);
ipow46(A, 2*NX*NY, &an);
/*--------------------------------------------------------------------
c Go through by z planes filling in one square at a time.
c-------------------------------------------------------------------*/
for (k = 0; k < dims[0][2]; k++) {
x0 = start;
vranlc(2*NX*dims[0][1], &x0, A, tmp);
t = 1;
for (j = 0; j < dims[0][1]; j++)
for (i = 0; i < NX; i++) {
u0[k][j][i].real = tmp[t++];
u0[k][j][i].imag = tmp[t++];
}
if (k != dims[0][2]) dummy = randlc(&start, an);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void ipow46(double a, int exponent, double *result) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute a^exponent mod 2^46
c-------------------------------------------------------------------*/
double dummy, q, r;
int n, n2;
/*--------------------------------------------------------------------
c Use
c a^n = a^(n/2)*a^(n/2) if n even else
c a^n = a*a^(n-1) if n odd
c-------------------------------------------------------------------*/
*result = 1;
if (exponent == 0) return;
q = a;
r = 1;
n = exponent;
while (n > 1) {
n2 = n/2;
if (n2 * 2 == n) {
dummy = randlc(&q, q);
n = n2;
} else {
dummy = randlc(&r, q);
n = n-1;
}
}
dummy = randlc(&r, q);
*result = r;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void setup(void) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, i, j, fstatus;
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - FT Benchmark\n\n");
niter = NITER_DEFAULT;
printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ);
printf(" Iterations : %7d\n", niter);
/* 1004 format(' Number of processes : ', i7)
1005 format(' Processor array : ', i3, 'x', i3)
1006 format(' WARNING: compiled for ', i5, ' processes. ',
> ' Will not verify. ')*/
#pragma omp parallel for
for (i = 0;i < 3 ; i++) {
dims[i][0] = NX;
dims[i][1] = NY;
dims[i][2] = NZ;
}
#pragma omp parallel for
for (i = 0; i < 3; i++) {
xstart[i] = 1;
xend[i] = NX;
ystart[i] = 1;
yend[i] = NY;
zstart[i] = 1;
zend[i] = NZ;
}
/*--------------------------------------------------------------------
c Set up info for blocking of ffts and transposes. This improves
c performance on cache-based systems. Blocking involves
c working on a chunk of the problem at a time, taking chunks
c along the first, second, or third dimension.
c
c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim)
c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims)
c Since 1st dim is always in processor, we'll assume it's long enough
c (default blocking factor is 16 so min size for 1st dim is 16)
c The only case we have to worry about is cffts1 in a 2d decomposition.
c so the blocking factor should not be larger than the 2nd dimension.
c-------------------------------------------------------------------*/
fftblock = FFTBLOCK_DEFAULT;
fftblockpad = FFTBLOCKPAD_DEFAULT;
if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------*/
int i, j, k, ii, ii2, jj, ij2, kk;
double ap;
/*--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n/2, n) - n/2
c-------------------------------------------------------------------*/
#pragma omp parallel for private(i ,j ,k ,ii ,ii2 ,jj ,ij2 ,kk )
for (i = 0; i < dims[2][0]; i++) {
ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2;
ii2 = ii*ii;
#pragma omp parallel for private(j) firstprivate(k ,ii ,ii2 ,jj ,ij2 ,kk ,i )
for (j = 0; j < dims[2][1]; j++) {
jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2;
ij2 = jj*jj+ii2;
#pragma omp parallel for private(k) firstprivate(j ,ii ,ii2 ,jj ,ij2 ,kk ,i )
for (k = 0; k < dims[2][2]; k++) {
kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2;
indexmap[k][j][i] = kk*kk+ij2;
}
}
}
/*--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------*/
ap = - 4.0 * ALPHA * PI * PI;
ex[0] = 1.0;
ex[1] = exp(ap);
for (i = 2; i <= EXPMAX; i++) {
ex[i] = ex[i-1]*ex[1];
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void print_timers(void) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i;
char *tstrings[] = { " total ",
" setup ",
" fft ",
" evolve ",
" checksum ",
" fftlow ",
" fftcopy " };
for (i = 0; i < T_MAX; i++) {
if (timer_read(i) != 0.0) {
printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i));
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
/*--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xin/xout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------*/
if (dir == 1) {
cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */
cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */
cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */
} else {
cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */
cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */
cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, jj;
#pragma omp parallel for
for (i = 0; i < 3; i++) {
logd[i] = ilog2(d[i]);
}
{
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
#pragma omp parallel for private(i ,j ,k ,jj )
for (k = 0; k < d[2]; k++) {
for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private(j) firstprivate(i ,jj ,k )
for (j = 0; j < fftblock; j++) {
#pragma omp parallel for private(i) firstprivate(jj ,j ,k )
for (i = 0; i < d[0]; i++) {
y0[i][j].real = x[k][j+jj][i].real;
y0[i][j].imag = x[k][j+jj][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[0],
d[0], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private(j) firstprivate(fftblock ,i ,jj ,x ,k )
for (j = 0; j < fftblock; j++) {
for (i = 0; i < d[0]; i++) {
xout[k][j+jj][i].real = y0[i][j].real;
xout[k][j+jj][i].imag = y0[i][j].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, ii;
#pragma omp parallel for
for (i = 0; i < 3; i++) {
logd[i] = ilog2(d[i]);
}
{
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
#pragma omp parallel for private(i ,j ,k ,ii )
for (k = 0; k < d[2]; k++) {
for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k )
for (j = 0; j < d[1]; j++) {
#pragma omp parallel for private(i) firstprivate(ii ,x ,fftblock ,j ,k )
for (i = 0; i < fftblock; i++) {
y0[j][i].real = x[k][j][i+ii].real;
y0[j][i].imag = x[k][j][i+ii].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[1],
d[1], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private(j) firstprivate(i ,ii ,x ,fftblock ,k )
for (j = 0; j < d[1]; j++) {
for (i = 0; i < fftblock; i++) {
xout[k][j][i+ii].real = y0[j][i].real;
xout[k][j][i+ii].imag = y0[j][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX],
dcomplex xout[NZ][NY][NX],
dcomplex y0[NX][FFTBLOCKPAD],
dcomplex y1[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int logd[3];
int i, j, k, ii;
#pragma omp parallel for
for (i = 0;i < 3; i++) {
logd[i] = ilog2(d[i]);
}
{
dcomplex y0[NX][FFTBLOCKPAD];
dcomplex y1[NX][FFTBLOCKPAD];
#pragma omp parallel for private(i ,j ,k ,ii )
for (j = 0; j < d[1]; j++) {
for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) {
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private(k) firstprivate(i ,ii ,j )
for (k = 0; k < d[2]; k++) {
#pragma omp parallel for private(i) firstprivate(ii ,k ,j )
for (i = 0; i < fftblock; i++) {
y0[k][i].real = x[k][j][i+ii].real;
y0[k][i].imag = x[k][j][i+ii].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
cfftz (is, logd[2],
d[2], y0, y1);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma omp parallel for private(k) firstprivate(i ,ii ,x ,fftblock ,j )
for (k = 0; k < d[2]; k++) {
for (i = 0; i < fftblock; i++) {
xout[k][j][i+ii].real = y0[k][i].real;
xout[k][j][i+ii].imag = y0[k][i].imag;
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fft_init (int n) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the roots-of-unity array that will be used for subsequent FFTs.
c-------------------------------------------------------------------*/
int m,nu,ku,i,j,ln;
double t, ti;
/*--------------------------------------------------------------------
c Initialize the U array with sines and cosines in a manner that permits
c stride one access at each FFT iteration.
c-------------------------------------------------------------------*/
nu = n;
m = ilog2(n);
u[0].real = (double)m;
u[0].imag = 0.0;
ku = 1;
ln = 1;
for (j = 1; j <= m; j++) {
t = PI / ln;
for (i = 0; i <= ln - 1; i++) {
ti = i * t;
u[i+ku].real = cos(ti);
u[i+ku].imag = sin(ti);
}
ku = ku + ln;
ln = 2 * ln;
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Computes NY N-point complex-to-complex FFTs of X using an algorithm due
c to Swarztrauber. X is both the input and the output array, while Y is a
c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to
c perform FFTs, the array U must be initialized by calling CFFTZ with IS
c set to 0 and M set to MX, where MX is the maximum value of M for any
c subsequent call.
c-------------------------------------------------------------------*/
int i,j,l,mx;
/*--------------------------------------------------------------------
c Check if input parameters are invalid.
c-------------------------------------------------------------------*/
mx = (int)(u[0].real);
if ((is != 1 && is != -1) || m < 1 || m > mx) {
printf("CFFTZ: Either U has not been initialized, or else\n"
"one of the input parameters is invalid%5d%5d%5d\n",
is, m, mx);
exit(1);
}
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= m; l+=2) {
fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y);
if (l == m) break;
fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x);
}
/*--------------------------------------------------------------------
c Copy Y to X.
c-------------------------------------------------------------------*/
if (m % 2 == 1) {
for (j = 0; j < n; j++) {
#pragma omp parallel for private(i) firstprivate(fftblock ,j )
for (i = 0; i < fftblock; i++) {
x[j][i].real = y[j][i].real;
x[j][i].imag = y[j][i].imag;
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void fftz2 (int is, int l, int m, int n, int ny, int ny1,
dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD],
dcomplex y[NX][FFTBLOCKPAD]) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs the L-th iteration of the second variant of the Stockham FFT.
c-------------------------------------------------------------------*/
int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22;
dcomplex u1,x11,x21;
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
n1 = n / 2;
if (l-1 == 0) {
lk = 1;
} else {
lk = 2 << ((l - 1)-1);
}
if (m-l == 0) {
li = 1;
} else {
li = 2 << ((m - l)-1);
}
lj = 2 * lk;
ku = li;
for (i = 0; i < li; i++) {
i11 = i * lk;
i12 = i11 + n1;
i21 = i * lj;
i22 = i21 + lk;
if (is >= 1) {
u1.real = u[ku+i].real;
u1.imag = u[ku+i].imag;
} else {
u1.real = u[ku+i].real;
u1.imag = -u[ku+i].imag;
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k = 0; k < lk; k++) {
for (j = 0; j < ny; j++) {
double x11real, x11imag;
double x21real, x21imag;
x11real = x[i11+k][j].real;
x11imag = x[i11+k][j].imag;
x21real = x[i12+k][j].real;
x21imag = x[i12+k][j].imag;
y[i21+k][j].real = x11real + x21real;
y[i21+k][j].imag = x11imag + x21imag;
y[i22+k][j].real = u1.real * (x11real - x21real)
- u1.imag * (x11imag - x21imag);
y[i22+k][j].imag = u1.real * (x11imag - x21imag)
+ u1.imag * (x11real - x21real);
}
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static int ilog2(int n) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int nn, lg;
if (n == 1) {
return 0;
}
lg = 1;
nn = 2;
while (nn < n) {
nn = nn << 1;
lg++;
}
return lg;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) {
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int j, q,r,s, ierr;
dcomplex chk,allchk;
chk.real = 0.0;
chk.imag = 0.0;
#pragma omp parallel for
for (j = 1; j <= 1024; j++) {
q = j%NX+1;
if (q >= xstart[0] && q <= xend[0]) {
r = (3*j)%NY+1;
if (r >= ystart[0] && r <= yend[0]) {
s = (5*j)%NZ+1;
if (s >= zstart[0] && s <= zend[0]) {
cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]);
}
}
}
}
{
sums[i].real += chk.real;
sums[i].imag += chk.imag;
}
{
/* complex % real */
sums[i].real = sums[i].real/(double)(NTOTAL);
sums[i].imag = sums[i].imag/(double)(NTOTAL);
printf("T = %5d Checksum = %22.12e %22.12e\n",
i, sums[i].real, sums[i].imag);
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void verify (int d1, int d2, int d3, int nt,
boolean *verified, char *class) {
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, size, i;
double err, epsilon;
/*--------------------------------------------------------------------
c Sample size reference checksums
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Class S size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_s[6+1] = { 0.0,
5.546087004964e+02,
5.546385409189e+02,
5.546148406171e+02,
5.545423607415e+02,
5.544255039624e+02,
5.542683411902e+02 };
double vdata_imag_s[6+1] = { 0.0,
4.845363331978e+02,
4.865304269511e+02,
4.883910722336e+02,
4.901273169046e+02,
4.917475857993e+02,
4.932597244941e+02 };
/*--------------------------------------------------------------------
c Class W size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_w[6+1] = { 0.0,
5.673612178944e+02,
5.631436885271e+02,
5.594024089970e+02,
5.560698047020e+02,
5.530898991250e+02,
5.504159734538e+02 };
double vdata_imag_w[6+1] = { 0.0,
5.293246849175e+02,
5.282149986629e+02,
5.270996558037e+02,
5.260027904925e+02,
5.249400845633e+02,
5.239212247086e+02 };
/*--------------------------------------------------------------------
c Class A size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_a[6+1] = { 0.0,
5.046735008193e+02,
5.059412319734e+02,
5.069376896287e+02,
5.077892868474e+02,
5.085233095391e+02,
5.091487099959e+02 };
double vdata_imag_a[6+1] = { 0.0,
5.114047905510e+02,
5.098809666433e+02,
5.098144042213e+02,
5.101336130759e+02,
5.104914655194e+02,
5.107917842803e+02 };
/*--------------------------------------------------------------------
c Class B size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_b[20+1] = { 0.0,
5.177643571579e+02,
5.154521291263e+02,
5.146409228649e+02,
5.142378756213e+02,
5.139626667737e+02,
5.137423460082e+02,
5.135547056878e+02,
5.133910925466e+02,
5.132470705390e+02,
5.131197729984e+02,
5.130070319283e+02,
5.129070537032e+02,
5.128182883502e+02,
5.127393733383e+02,
5.126691062020e+02,
5.126064276004e+02,
5.125504076570e+02,
5.125002331720e+02,
5.124551951846e+02,
5.124146770029e+02 };
double vdata_imag_b[20+1] = { 0.0,
5.077803458597e+02,
5.088249431599e+02,
5.096208912659e+02,
5.101023387619e+02,
5.103976610617e+02,
5.105948019802e+02,
5.107404165783e+02,
5.108576573661e+02,
5.109577278523e+02,
5.110460304483e+02,
5.111252433800e+02,
5.111968077718e+02,
5.112616233064e+02,
5.113203605551e+02,
5.113735928093e+02,
5.114218460548e+02,
5.114656139760e+02,
5.115053595966e+02,
5.115415130407e+02,
5.115744692211e+02 };
/*--------------------------------------------------------------------
c Class C size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_c[20+1] = { 0.0,
5.195078707457e+02,
5.155422171134e+02,
5.144678022222e+02,
5.140150594328e+02,
5.137550426810e+02,
5.135811056728e+02,
5.134569343165e+02,
5.133651975661e+02,
5.132955192805e+02,
5.132410471738e+02,
5.131971141679e+02,
5.131605205716e+02,
5.131290734194e+02,
5.131012720314e+02,
5.130760908195e+02,
5.130528295923e+02,
5.130310107773e+02,
5.130103090133e+02,
5.129905029333e+02,
5.129714421109e+02 };
double vdata_imag_c[20+1] = { 0.0,
5.149019699238e+02,
5.127578201997e+02,
5.122251847514e+02,
5.121090289018e+02,
5.121143685824e+02,
5.121496764568e+02,
5.121870921893e+02,
5.122193250322e+02,
5.122454735794e+02,
5.122663649603e+02,
5.122830879827e+02,
5.122965869718e+02,
5.123075927445e+02,
5.123166486553e+02,
5.123241541685e+02,
5.123304037599e+02,
5.123356167976e+02,
5.123399592211e+02,
5.123435588985e+02,
5.123465164008e+02 };
epsilon = 1.0e-12;
*verified = TRUE;
*class = 'U';
if (d1 == 64 &&
d2 == 64 &&
d3 == 64 &&
nt == 6) {
*class = 'S';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 128 &&
d2 == 128 &&
d3 == 32 &&
nt == 6) {
*class = 'W';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 256 &&
d2 == 256 &&
d3 == 128 &&
nt == 6) {
*class = 'A';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 512 &&
d2 == 256 &&
d3 == 256 &&
nt == 20) {
*class = 'B';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
} else if (d1 == 512 &&
d2 == 512 &&
d3 == 512 &&
nt == 20) {
*class = 'C';
for (i = 1; i <= nt; i++) {
err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i];
if (fabs(err) > epsilon) {
*verified = FALSE;
break;
}
}
}
if (*class != 'U') {
printf("Result verification successful\n");
} else {
printf("Result verification failed\n");
}
printf("class = %1c\n", *class);
}
|
GB_binop__land_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__land_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__land_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int8)
// A*D function (colscale): GB (_AxD__land_int8)
// D*A function (rowscale): GB (_DxB__land_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int8)
// C=scalar+B GB (_bind1st__land_int8)
// C=scalar+B' GB (_bind1st_tran__land_int8)
// C=A+scalar GB (_bind2nd__land_int8)
// C=A'+scalar GB (_bind2nd_tran__land_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT8 || GxB_NO_LAND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__land_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
odd-even-merge-sort_omp_beta.c | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<limits.h>
#include<omp.h>
#define MAX(a,b) ((a<b)?b:a)
#define MIN(a,b) ((a>=b)?b:a)
#define ODD(A,n,i) A[n+2*i]
#define EVEN(A,n,i) A[n+2*i+1]
void print_array(int *A,int l,int r)
{
printf("\n");
for(int i=l;i<r;i++)
printf("%3d ",i);
printf("\n");
for(int i=l;i<r;i++)
printf("%3d ",A[i]);
printf("\n");
}
int *generate(int n)
{
int *A=(int *)malloc(sizeof(int)*n);
srand(time(NULL));
for(int i=0;i<n;i++)
A[i]=rand()%n;
return A;
}
int *copy(int *A,int n)
{
int *C=(int *)malloc(sizeof(int)*n);
for(int i=0;i<n;i++)
C[i]=A[i];
return C;
}
int compare(const void *a,const void *b){return ( *(int *)a < *(int *)b )?0:1;}
void validate(int *A1,int *A2,int n){for(int i=0;i<n;i++)if(A1[i]!=A2[i]){printf("Failure\n");return;}printf("Success!\n");}
void odd_even_merge_sort_omp(int *A,int l,int c,int r);
void odd_even_merge_sort(int *A,int l,int c,int r);
void odd_even_merge(int *A,int l,int c,int r);
void odd_even_merge2(int *A,int s);
int log_2(int n){int i=0;while(n!=0){n=n>>1;i++;}return i-1;}
//int unsort[32]={2,3,18,9,23,11,4,25,0,13,6,21,14,27,1,10,15,5,16,17,8,24,22,12,19,29,26,30,28,7,31,20};
//int unsort_array[16]={2,3,9,11,4,13,6,14,1,10,15,5,16,8,12,7};
//int unsort_array[8]={2,3,4,6,1,5,8,7};
int main(int argc,char* argv[])
{
char *n="32";
int N=atoi(argc==2?argv[1]:n);
int *unpsort=generate(N);
int *unqsort=copy(unpsort,N);
double odd_even_parallel_t = omp_get_wtime();
odd_even_merge_sort_omp(unpsort,0,N/2,N);
odd_even_parallel_t = omp_get_wtime()-odd_even_parallel_t;
double qsort_t = omp_get_wtime();
qsort(unqsort,N,sizeof(int),&compare);
qsort_t=omp_get_wtime()-qsort_t;
// print_array(unpsort,0,N);
// print_array(unqsort,0,N);
validate(unpsort,unqsort,N);
printf("qsort=%lf,Parallel = %lf sec (%lf times speedup)\n",
qsort_t,
odd_even_parallel_t,(qsort_t/odd_even_parallel_t));
return 0;
}
void odd_even_merge(int *A,int l,int c,int r)
{
/**
printf("enter odd_even_merge(n=%d)\n",n);
print_array(A,l,r);
**/
int n=c-l;
int N=n/2;
int *D=(int *)malloc(sizeof(int)*n);
int *E=(int *)malloc(sizeof(int)*n);
int t0=0,t1=0;
for(int i=0;i<n;i++)
{
if( t0 == N || ( t1 != N && ODD(A,l,t0) > ODD(A,c,t1) ) )
D[i]=ODD(A,c,t1++);
else
D[i]=ODD(A,l,t0++);
}
int t2=0,t3=0;
for(int i=0;i<n;i++)
{
if( t2 == N || ( t3 != N && EVEN(A,l,t2) > EVEN(A,c,t3)) )
E[i]=EVEN(A,c,t3++);
else
E[i]=EVEN(A,l,t2++);
}
//printf("D:");print_array(D,0,n);
//printf("E:");print_array(E,0,n);
A[l]=D[0];
for(int i=1;i<n;i++)
{
A[l+2*i-1]=MIN(D[i],E[i-1]);
A[l+2*i]=MAX(D[i],E[i-1]);
}
A[r-1]=E[n-1];
free(D);
free(E);
//print_array(A,l,n);
}
void odd_even_merge2(int *A,int s)
{
int TMP[4]={A[s+0],A[s+1],A[s+2],A[s+3]};
A[s+0]=MIN(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3]));
A[s+1]=MIN(MAX(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3])),MIN(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3])));
A[s+2]=MAX(MAX(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3])),MIN(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3])));
A[s+3]=MAX(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3]));
}
void odd_even_merge_sort(int *A,int l,int c,int r)
{
//printf("odd_even_merge_sort(%d,%d,%d)\n",l,c,r);
//print_array(A,l,r);
if(r-l==4)
{
odd_even_merge2(A,l);
return;
}
if(c-l==4)
{
odd_even_merge2(A,l);
odd_even_merge2(A,c);
odd_even_merge(A,l,c,r);
return;
}
odd_even_merge_sort(A,l,(l+c)/2,c);
odd_even_merge_sort(A,c,(c+r)/2,r);
odd_even_merge(A,l,c,r);
}
void odd_even_merge_sort_omp(int *A,int l,int c,int r)
{
int j=1;
int size;
#pragma omp parallel shared(size)
{
size = omp_get_num_threads();
}
for(int i = size ; i > 1 ; i=i/2)
{
if(j==1)
{
#pragma omp parallel for shared(A,size) firstprivate(l,c,r)
for(int k=0; k < i ; k++)
{
int L=k*(r/size)*j;
int R=(k+1)*(r/size)*j;
int C=(L+R)/2;
odd_even_merge_sort(A,L,C,R);
}
}
else
{
#pragma omp parallel for shared(A,size) firstprivate(l,c,r)
for(int k=0; k < i ; k++)
{
int L=k*(r/size)*j;
int R=(k+1)*(r/size)*j;
int C=(L+R)/2;
odd_even_merge(A,L,C,R);
}
}
j=j<<1;
}
//print_array(A,l,r);
odd_even_merge(A,l,c,r);
}
|
accelerate_kernel_c.c | /*Crown Copyright 2012 AWE.
*
* This file is part of CloverLeaf.
*
* CloverLeaf is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your option)
* any later version.
*
* CloverLeaf is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* CloverLeaf. If not, see http://www.gnu.org/licenses/. */
/**
* @brief C acceleration kernel
* @author Wayne Gaudin
* @details The pressure and viscosity gradients are used to update the
* velocity field.
*/
#include <stdio.h>
#include <stdlib.h>
#include "ftocmacros.h"
#include <math.h>
void accelerate_kernel_c_(int *xmin,int *xmax,int *ymin,int *ymax,
double *dbyt,
double *xarea,
double *yarea,
double *volume,
double *density0,
double *pressure,
double *viscosity,
double *xvel0,
double *yvel0,
double *xvel1,
double *yvel1)
{
int x_min=*xmin;
int x_max=*xmax;
int y_min=*ymin;
int y_max=*ymax;
double dt=*dbyt;
int j,k,err;
double nodal_mass;
double stepby_mass_s;
#pragma omp parallel
{
#pragma omp for private(nodal_mass,j, stepby_mass_s)
for (k=y_min;k<=y_max+1;k++) {
#pragma ivdep
for (j=x_min;j<=x_max+1;j++) {
nodal_mass=(density0[FTNREF2D(j-1,k-1,x_max+4,x_min-2,y_min-2)]*volume[FTNREF2D(j-1,k-1,x_max+4,x_min-2,y_min-2)]
+density0[FTNREF2D(j ,k-1,x_max+4,x_min-2,y_min-2)]*volume[FTNREF2D(j ,k-1,x_max+4,x_min-2,y_min-2)]
+density0[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]*volume[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]
+density0[FTNREF2D(j-1,k ,x_max+4,x_min-2,y_min-2)]*volume[FTNREF2D(j-1,k ,x_max+4,x_min-2,y_min-2)])
*0.25;
stepby_mass_s=0.5*dt/nodal_mass;
xvel1[FTNREF2D(j ,k ,x_max+5,x_min-2,y_min-2)]=xvel0[FTNREF2D(j ,k ,x_max+5,x_min-2,y_min-2)]
-stepby_mass_s
*(xarea[FTNREF2D(j ,k ,x_max+5,x_min-2,y_min-2)]
*(pressure[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]-pressure[FTNREF2D(j-1,k ,x_max+4,x_min-2,y_min-2)])
+xarea[FTNREF2D(j ,k-1,x_max+5,x_min-2,y_min-2)]
*(pressure[FTNREF2D(j ,k-1,x_max+4,x_min-2,y_min-2)]-pressure[FTNREF2D(j-1,k-1,x_max+4,x_min-2,y_min-2)]));
yvel1[FTNREF2D(j ,k ,x_max+5,x_min-2,y_min-2)]=yvel0[FTNREF2D(j ,k ,x_max+5,x_min-2,y_min-2)]
-stepby_mass_s
*(yarea[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]
*(pressure[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]-pressure[FTNREF2D(j ,k-1,x_max+4,x_min-2,y_min-2)])
+yarea[FTNREF2D(j-1,k ,x_max+4,x_min-2,y_min-2)]
*(pressure[FTNREF2D(j-1,k ,x_max+4,x_min-2,y_min-2)]-pressure[FTNREF2D(j-1,k-1,x_max+4,x_min-2,y_min-2)]));
xvel1[FTNREF2D(j ,k ,x_max+5,x_min-2,y_min-2)]=xvel1[FTNREF2D(j ,k ,x_max+5,x_min-2,y_min-2)]
-stepby_mass_s
*(xarea[FTNREF2D(j ,k ,x_max+5,x_min-2,y_min-2)]
*(viscosity[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]-viscosity[FTNREF2D(j-1,k ,x_max+4,x_min-2,y_min-2)])
+xarea[FTNREF2D(j ,k-1,x_max+5,x_min-2,y_min-2)]
*(viscosity[FTNREF2D(j ,k-1,x_max+4,x_min-2,y_min-2)]-viscosity[FTNREF2D(j-1,k-1,x_max+4,x_min-2,y_min-2)]));
yvel1[FTNREF2D(j ,k ,x_max+5,x_min-2,y_min-2)]=yvel1[FTNREF2D(j ,k ,x_max+5,x_min-2,y_min-2)]
-stepby_mass_s
*(yarea[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]
*(viscosity[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]-viscosity[FTNREF2D(j ,k-1,x_max+4,x_min-2,y_min-2)])
+yarea[FTNREF2D(j-1,k ,x_max+4,x_min-2,y_min-2)]
*(viscosity[FTNREF2D(j-1,k ,x_max+4,x_min-2,y_min-2)]-viscosity[FTNREF2D(j-1,k-1,x_max+4,x_min-2,y_min-2)]));
}
}
}
}
|
oyranos_cmm_oyra_image_expose.c | /** @file oyranos_cmm_oyra_image_expose.c
*
* Oyranos is an open source Color Management System
*
* @par Copyright:
* 2016 (C) Kai-Uwe Behrmann
*
* @brief expose module for Oyranos
* @internal
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* new BSD <http://www.opensource.org/licenses/BSD-3-Clause>
* @since 2016/04/11
*/
#include "oyCMMapi4_s.h"
#include "oyCMMapi7_s.h"
#include "oyCMMui_s.h"
#include "oyConnectorImaging_s.h"
#include "oyRectangle_s.h"
#include "oyRectangle_s_.h"
#include "oyranos_cmm.h"
#include "oyranos_cmm_oyra.h"
#include "oyranos_helper.h"
#include "oyranos_i18n.h"
#include "oyranos_string.h"
#include <math.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef HAVE_POSIX
#include <stdint.h> /* UINT32_MAX */
#endif
/* OY_IMAGE_EXPOSE_REGISTRATION */
/* OY_IMAGE_EXPOSE_REGISTRATION ----------------------------------------------*/
void oySensibleClip ( double * c, icColorSpaceSignature sig, int range_max, double expose )
{
int max = 0, max_pos = 0,
mid, mid_pos,
min = range_max, min_pos = 0,
i,
n = oyICCColorSpaceGetChannelCount(sig);
if(sig == icSigLabData ||
sig == icSigYCbCrData)
n = 1;
for(i = 0; i < n; ++i)
{
if(max < c[i]) { max = c[i]; max_pos = i; }
if(min > c[i]) { min = c[i]; min_pos = i; }
}
if( min * expose > range_max)
for(i = 0; i < n; ++i)
c[i] = range_max;
else if(max * expose <= range_max)
for(i = 0; i < n; ++i)
c[i] *= expose;
else if(n > 1)
{
double exposed_min = min * expose;
double mid_part;
double exposed_mid;
mid_pos = min_pos != 0 && max_pos != 0 ? 0 : min_pos != 1 && max_pos != 1 ? 1 : 2;
mid = c[mid_pos];
mid_part = (double)( mid - min )/(double)( max - min );
c[min_pos] = exposed_min + 0.5;
exposed_mid = exposed_min + mid_part * (range_max - exposed_min);
c[mid_pos] = exposed_mid + 0.5;
c[max_pos] = range_max;
}
}
/** @brief implement oyCMMFilter_GetNext_f()
*
* @version Oyranos: 0.9.6
* @date 2016/04/04
* @since 2013/06/10 (Oyranos: 0.9.5)
*/
int oyraFilter_ImageExposeRun ( oyFilterPlug_s * requestor_plug,
oyPixelAccess_s * ticket )
{
int result = 0, error = 0;
oyFilterSocket_s * socket = 0;
oyFilterNode_s * input_node = 0,
* node = 0;
oyFilterPlug_s * plug = 0;
oyImage_s * image = 0;
int dirty = 0;
socket = oyFilterPlug_GetSocket( requestor_plug );
node = oyFilterSocket_GetNode( socket );
image = (oyImage_s*)oyFilterSocket_GetData( socket );
if(!image)
{
result = 1;
goto clean_expose1;
}
if(oy_debug)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"image [%d](%d)\n",OY_DBG_ARGS_,oyStruct_GetId((oyStruct_s*)image),oyImage_GetWidth(image) );
{
oyRectangle_s * ticket_roi = oyPixelAccess_GetArrayROI( ticket );
double expose = 1.0;
oyOptions_s * node_opts = oyFilterNode_GetOptions( node, 0 );
if(!node_opts)
dirty = 1;
if(dirty)
{
result = dirty;
goto clean_expose2;
}
plug = oyFilterNode_GetPlug( node, 0 );
/* select node */
input_node = oyFilterNode_GetPlugNode( node, 0 );
/* find filters own expose factor */
error = oyOptions_FindDouble( node_opts,
"//" OY_TYPE_STD "/expose/expose",
0, &expose );
if(error) WARNc2_S("%s %d", _("found issues"),error);
if(oy_debug > 2)
oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_
"%s expose: %f",OY_DBG_ARGS_, oyPixelAccess_Show(ticket), expose);
if(expose != 1.0)
{
oyImage_s * output_image = oyPixelAccess_GetOutputImage( ticket );
oyArray2d_s * array_out = oyPixelAccess_GetArray( ticket );
oyProfile_s * p = oyImage_GetProfile( output_image );
icColorSpaceSignature sig = oyProfile_GetSignature( p, oySIGNATURE_COLOR_SPACE );
int layout_dst = oyImage_GetPixelLayout( output_image, oyLAYOUT );
int channels_dst = oyToChannels_m( layout_dst );
int byte_swap = oyToByteswap_m( layout_dst );
int ticket_array_pix_width;
/* avoid division by zero */
if(!channels_dst) channels_dst = 1;
ticket_array_pix_width = oyArray2d_GetWidth( array_out ) / channels_dst;
{
int w,h,x,y, i, start_x,start_y;
unsigned int max = 1;
oyRectangle_s * ticket_roi = oyPixelAccess_GetArrayROI( ticket );
oyRectangle_s_ roi_= {oyOBJECT_RECTANGLE_S,0,0,0, 0,0,0,0};
oyRectangle_s * roi = (oyRectangle_s*)&roi_;
uint8_t ** array_out_data;
/* get pixel layout infos for copying */
oyDATATYPE_e data_type_out = oyToDataType_m( layout_dst );
int bps_out = oyDataTypeGetSize( data_type_out );
/* get the source pixels */
result = oyFilterNode_Run( input_node, plug, ticket );
/* get the channel buffers */
array_out_data = oyArray2d_GetData( array_out );
w = oyArray2d_GetWidth( array_out ) / channels_dst;
h = oyArray2d_GetHeight( array_out );
oyRectangle_SetByRectangle( roi, ticket_roi );
oyRectangle_Scale( roi, ticket_array_pix_width );
start_x = OY_ROUND(roi_.x);
start_y = OY_ROUND(roi_.y);
switch(data_type_out)
{
case oyUINT8: max = 255; break;
case oyUINT16: max = 65535; break;
case oyUINT32: max = UINT32_MAX; break;
default: break;
}
/* expose the samples */
#if defined(USE_OPENMP)
#pragma omp parallel for private(x,y,i)
#endif
for(y = start_y; y < h; ++y)
{
for(x = start_x; x < w; ++x)
{
if( (sig == icSigRgbData ||
sig == icSigXYZData ||
sig == icSigLabData ||
sig == icSigYCbCrData)
&& channels_dst >= 3)
{
double rgb[3], v;
for(i = 0; i < 3; ++i)
{
switch(data_type_out)
{
case oyUINT8:
rgb[i] = array_out_data[y][x*channels_dst*bps_out + i*bps_out];
break;
case oyUINT16:
{
uint16_t v = *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
if(byte_swap) v = oyByteSwapUInt16(v);
rgb[i] = v;
}
break;
case oyUINT32:
{
uint32_t v = *((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
if(byte_swap) v = oyByteSwapUInt32(v);
rgb[i] = v;
}
break;
case oyHALF:
v = *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
rgb[i] = v;
break;
case oyFLOAT:
v = *((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
rgb[i] = v;
break;
case oyDOUBLE:
v = *((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
rgb[i] = v;
break;
}
}
oySensibleClip ( rgb, sig, max, expose );
for(i = 0; i < 3; ++i)
{
v = rgb[i];
switch(data_type_out)
{
case oyUINT8:
array_out_data[y][x*channels_dst*bps_out + i*bps_out] = v;
break;
case oyUINT16:
{ uint16_t u16 = v;
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = byte_swap ? oyByteSwapUInt16(u16) : u16;
}
break;
case oyUINT32:
{ uint32_t u32 = v;
*((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = byte_swap ? oyByteSwapUInt16(u32) : u32;
}
break;
case oyHALF:
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = v;
break;
case oyFLOAT:
*((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = v;
break;
case oyDOUBLE:
*((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = v;
break;
}
}
}
else
for(i = 0; i < channels_dst; ++i)
{
int v;
switch(data_type_out)
{
case oyUINT8:
v = array_out_data[y][x*channels_dst*bps_out + i*bps_out] * expose;
if(v > 255) v = 255;
array_out_data[y][x*channels_dst*bps_out + i*bps_out] = v;
break;
case oyUINT16:
v = *((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]);
if(byte_swap) v = oyByteSwapUInt16(v);
v *= expose;
if(v > 65535) v = 65535;
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) = byte_swap ? oyByteSwapUInt16(v) : v;
break;
case oyUINT32:
*((uint32_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
case oyHALF:
*((uint16_t*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
case oyFLOAT:
*((float*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
case oyDOUBLE:
*((double*)&array_out_data[y][x*channels_dst*bps_out + i*bps_out]) *= expose;
break;
}
}
}
}
}
oyArray2d_Release( &array_out );
oyImage_Release( &output_image );
oyProfile_Release( &p );
} else /* expose == 1.0 */
{
result = oyFilterNode_Run( input_node, plug, ticket );
}
clean_expose2:
oyOptions_Release( &node_opts );
oyFilterPlug_Release( &plug );
oyRectangle_Release( &ticket_roi );
oyFilterNode_Release( &input_node );
}
clean_expose1:
oyImage_Release( &image );
oyFilterSocket_Release( &socket );
oyFilterNode_Release( &node );
return result;
}
#define OY_IMAGE_EXPOSE_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "expose"
/** @brief oyra oyCMMapi7_s implementation
*
* a filter providing a expose image filter
*
* @version Oyranos: 0.9.5
* @since 2013/06/14 (Oyranos: 0.9.5)
* @date 2013/06/14
*/
oyCMMapi_s * oyraApi7ImageExposeCreate(void)
{
oyCMMapi7_s * expose7;
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
static oyDATATYPE_e data_types[7] = {oyUINT8, oyUINT16, oyUINT32,
oyHALF, oyFLOAT, oyDOUBLE, 0};
oyConnectorImaging_s * plug = oyConnectorImaging_New(0),
* socket = oyConnectorImaging_New(0);
static oyConnectorImaging_s * plugs[2] = {0,0},
* sockets[2] = {0,0};
plugs[0] = plug;
sockets[0] = socket;
oyConnectorImaging_SetDataTypes( plug, data_types, 6 );
oyConnectorImaging_SetReg( plug, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( plug, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( plug, oyCMMgetImageConnectorPlugText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( plug, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_ID, 1 );
oyConnectorImaging_SetDataTypes( socket, data_types, 6 );
oyConnectorImaging_SetReg( socket, "//" OY_TYPE_STD "/manipulator.data" );
oyConnectorImaging_SetMatch( socket, oyFilterSocket_MatchImagingPlug );
oyConnectorImaging_SetTexts( socket, oyCMMgetImageConnectorSocketText,
oy_image_connector_texts );
oyConnectorImaging_SetIsPlug( socket, 0 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 );
oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_ID, 1 );
expose7 = oyCMMapi7_Create ( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_EXPOSE_REGISTRATION,
cmm_version, module_api,
NULL,
oyraFilter_ImageExposeRun,
(oyConnector_s**)plugs, 1, 0,
(oyConnector_s**)sockets, 1, 0,
0, 0 );
return (oyCMMapi_s*) expose7;
}
const char * oyraApi4UiImageExposeGetText (
const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
if(strcmp(select,"name") == 0)
{
if(type == oyNAME_NICK)
return "image_expose";
else if(type == oyNAME_NAME)
return _("Image[expose]");
else if(type == oyNAME_DESCRIPTION)
return _("Expose Image Filter Object");
} else if(strcmp(select,"help") == 0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("The filter adapts pixel brightness.");
else if(type == oyNAME_DESCRIPTION)
{
static char * help_desc = NULL;
if(!help_desc)
oyStringAddPrintf( &help_desc, 0,0, "%s",
_("The filter expects a \"expose\" double option and will process the data accordingly.")
);
return help_desc;
}
} else if(strcmp(select,"category") == 0)
{
if(type == oyNAME_NICK)
return "category";
else if(type == oyNAME_NAME)
return _("Image/Simple Image[expose]");
else if(type == oyNAME_DESCRIPTION)
return _("The filter is used to reduce pixels.");
}
return 0;
}
/** @brief oyra oyCMMapi4_s implementation
*
* a filter providing a expose image filter
*
* @version Oyranos: 0.9.5
* @since 2013/06/14 (Oyranos: 0.9.5)
* @date 2013/06/14
*/
oyCMMapi_s * oyraApi4ImageExposeCreate(void)
{
static const char * oyra_api4_ui_image_expose_texts[] = {"name", "help", "category", 0};
oyCMMui_s * ui = oyCMMui_Create( "Image/Simple Image[expose]", /* category */
oyraApi4UiImageExposeGetText,
oyra_api4_ui_image_expose_texts, 0 );
int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C},
module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C};
oyCMMapi4_s * expose4 = oyCMMapi4_Create( oyraCMMInit, oyraCMMMessageFuncSet,
OY_IMAGE_EXPOSE_REGISTRATION,
cmm_version, module_api,
NULL,
NULL,
NULL,
ui,
NULL );
return (oyCMMapi_s*)expose4;
}
/* OY_IMAGE_EXPOSE_REGISTRATION ----------------------------------------------*/
/* ---------------------------------------------------------------------------*/
|
brute.c | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
uint32_t table[3][4][256] = {
/* [[[cog
import cog, differentials
def arrout(arr):
return str(arr).replace('[', '{').replace(']', '}')
cog.outl(arrout(differentials.tes) + ',')
ds1 = [[te[a] ^ te[a^1] for a in range(256)] for te in differentials.tes]
cog.outl(arrout(ds1) + ',')
ds2 = [[te[a] ^ te[a^128] for a in range(256)] for te in differentials.tes]
cog.outl(arrout(ds2))
]]] */
{{3328402341, 4168907908, 4000806809, 4135287693, 4294111757, 3597364157, 3731845041, 2445657428, 1613770832, 33620227, 3462883241, 1445669757, 3892248089, 3050821474, 1303096294, 3967186586, 2412431941, 528646813, 2311702848, 4202528135, 4026202645, 2992200171, 2387036105, 4226871307, 1101901292, 3017069671, 1604494077, 1169141738, 597466303, 1403299063, 3832705686, 2613100635, 1974974402, 3791519004, 1033081774, 1277568618, 1815492186, 2118074177, 4126668546, 2211236943, 1748251740, 1369810420, 3521504564, 4193382664, 3799085459, 2883115123, 1647391059, 706024767, 134480908, 2512897874, 1176707941, 2646852446, 806885416, 932615841, 168101135, 798661301, 235341577, 605164086, 461406363, 3756188221, 3454790438, 1311188841, 2142417613, 3933566367, 302582043, 495158174, 1479289972, 874125870, 907746093, 3698224818, 3025820398, 1537253627, 2756858614, 1983593293, 3084310113, 2108928974, 1378429307, 3722699582, 1580150641, 327451799, 2790478837, 3117535592, 0, 3253595436, 1075847264, 3825007647, 2041688520, 3059440621, 3563743934, 2378943302, 1740553945, 1916352843, 2487896798, 2555137236, 2958579944, 2244988746, 3151024235, 3320835882, 1336584933, 3992714006, 2252555205, 2588757463, 1714631509, 293963156, 2319795663, 3925473552, 67240454, 4269768577, 2689618160, 2017213508, 631218106, 1269344483, 2723238387, 1571005438, 2151694528, 93294474, 1066570413, 563977660, 1882732616, 4059428100, 1673313503, 2008463041, 2950355573, 1109467491, 537923632, 3858759450, 4260623118, 3218264685, 2177748300, 403442708, 638784309, 3287084079, 3193921505, 899127202, 2286175436, 773265209, 2479146071, 1437050866, 4236148354, 2050833735, 3362022572, 3126681063, 840505643, 3866325909, 3227541664, 427917720, 2655997905, 2749160575, 1143087718, 1412049534, 999329963, 193497219, 2353415882, 3354324521, 1807268051, 672404540, 2816401017, 3160301282, 369822493, 2916866934, 3688947771, 1681011286, 1949973070, 336202270, 2454276571, 201721354, 1210328172, 3093060836, 2680341085, 3184776046, 1135389935, 3294782118, 965841320, 831886756, 3554993207, 4068047243, 3588745010, 2345191491, 1849112409, 3664604599, 26054028, 2983581028, 2622377682, 1235855840, 3630984372, 2891339514, 4092916743, 3488279077, 3395642799, 4101667470, 1202630377, 268961816, 1874508501, 4034427016, 1243948399, 1546530418, 941366308, 1470539505, 1941222599, 2546386513, 3421038627, 2715671932, 3899946140, 1042226977, 2521517021, 1639824860, 227249030, 260737669, 3765465232, 2084453954, 1907733956, 3429263018, 2420656344, 100860677, 4160157185, 470683154, 3261161891, 1781871967, 2924959737, 1773779408, 394692241, 2579611992, 974986535, 664706745, 3655459128, 3958962195, 731420851, 571543859, 3530123707, 2849626480, 126783113, 865375399, 765172662, 1008606754, 361203602, 3387549984, 2278477385, 2857719295, 1344809080, 2782912378, 59542671, 1503764984, 160008576, 437062935, 1707065306, 3622233649, 2218934982, 3496503480, 2185314755, 697932208, 1512910199, 504303377, 2075177163, 2824099068, 1841019862, 739644986}, {2781242211, 2230877308, 2582542199, 2381740923, 234877682, 3184946027, 2984144751, 1418839493, 1348481072, 50462977, 2848876391, 2102799147, 434634494, 1656084439, 3863849899, 2599188086, 1167051466, 2636087938, 1082771913, 2281340285, 368048890, 3954334041, 3381544775, 201060592, 3963727277, 1739838676, 4250903202, 3930435503, 3206782108, 4149453988, 2531553906, 1536934080, 3262494647, 484572669, 2923271059, 1783375398, 1517041206, 1098792767, 49674231, 1334037708, 1550332980, 4098991525, 886171109, 150598129, 2481090929, 1940642008, 1398944049, 1059722517, 201851908, 1385547719, 1699095331, 1587397571, 674240536, 2704774806, 252314885, 3039795866, 151914247, 908333586, 2602270848, 1038082786, 651029483, 1766729511, 3447698098, 2682942837, 454166793, 2652734339, 1951935532, 775166490, 758520603, 3000790638, 4004797018, 4217086112, 4137964114, 1299594043, 1639438038, 3464344499, 2068982057, 1054729187, 1901997871, 2534638724, 4121318227, 1757008337, 0, 750906861, 1614815264, 535035132, 3363418545, 3988151131, 3201591914, 1183697867, 3647454910, 1265776953, 3734260298, 3566750796, 3903871064, 1250283471, 1807470800, 717615087, 3847203498, 384695291, 3313910595, 3617213773, 1432761139, 2484176261, 3481945413, 283769337, 100925954, 2180939647, 4037038160, 1148730428, 3123027871, 3813386408, 4087501137, 4267549603, 3229630528, 2315620239, 2906624658, 3156319645, 1215313976, 82966005, 3747855548, 3245848246, 1974459098, 1665278241, 807407632, 451280895, 251524083, 1841287890, 1283575245, 337120268, 891687699, 801369324, 3787349855, 2721421207, 3431482436, 959321879, 1469301956, 4065699751, 2197585534, 1199193405, 2898814052, 3887750493, 724703513, 2514908019, 2696962144, 2551808385, 3516813135, 2141445340, 1715741218, 2119445034, 2872807568, 2198571144, 3398190662, 700968686, 3547052216, 1009259540, 2041044702, 3803995742, 487983883, 1991105499, 1004265696, 1449407026, 1316239930, 504629770, 3683797321, 168560134, 1816667172, 3837287516, 1570751170, 1857934291, 4014189740, 2797888098, 2822345105, 2754712981, 936633572, 2347923833, 852879335, 1133234376, 1500395319, 3084545389, 2348912013, 1689376213, 3533459022, 3762923945, 3034082412, 4205598294, 133428468, 634383082, 2949277029, 2398386810, 3913789102, 403703816, 3580869306, 2297460856, 1867130149, 1918643758, 607656988, 4049053350, 3346248884, 1368901318, 600565992, 2090982877, 2632479860, 557719327, 3717614411, 3697393085, 2249034635, 2232388234, 2430627952, 1115438654, 3295786421, 2865522278, 3633334344, 84280067, 33027830, 303828494, 2747425121, 1600795957, 4188952407, 3496589753, 2434238086, 1486471617, 658119965, 3106381470, 953803233, 334231800, 3005978776, 857870609, 3151128937, 1890179545, 2298973838, 2805175444, 3056442267, 574365214, 2450884487, 550103529, 1233637070, 4289353045, 2018519080, 2057691103, 2399374476, 4166623649, 2148108681, 387583245, 3664101311, 836232934, 3330556482, 3100665960, 3280093505, 2955516313, 2002398509, 287182607, 3413881008, 4238890068, 3597515707, 975967766}, {1671808611, 2089089148, 2006576759, 2072901243, 4061003762, 1807603307, 1873927791, 3310653893, 810573872, 16974337, 1739181671, 729634347, 4263110654, 3613570519, 2883997099, 1989864566, 3393556426, 2191335298, 3376449993, 2106063485, 4195741690, 1508618841, 1204391495, 4027317232, 2917941677, 3563566036, 2734514082, 2951366063, 2629772188, 2767672228, 1922491506, 3227229120, 3082974647, 4246528509, 2477669779, 644500518, 911895606, 1061256767, 4144166391, 3427763148, 878471220, 2784252325, 3845444069, 4043897329, 1905517169, 3631459288, 827548209, 356461077, 67897348, 3344078279, 593839651, 3277757891, 405286936, 2527147926, 84871685, 2595565466, 118033927, 305538066, 2157648768, 3795705826, 3945188843, 661212711, 2999812018, 1973414517, 152769033, 2208177539, 745822252, 439235610, 455947803, 1857215598, 1525593178, 2700827552, 1391895634, 994932283, 3596728278, 3016654259, 695947817, 3812548067, 795958831, 2224493444, 1408607827, 3513301457, 0, 3979133421, 543178784, 4229948412, 2982705585, 1542305371, 1790891114, 3410398667, 3201918910, 961245753, 1256100938, 1289001036, 1491644504, 3477767631, 3496721360, 4012557807, 2867154858, 4212583931, 1137018435, 1305975373, 861234739, 2241073541, 1171229253, 4178635257, 33948674, 2139225727, 1357946960, 1011120188, 2679776671, 2833468328, 1374921297, 2751356323, 1086357568, 2408187279, 2460827538, 2646352285, 944271416, 4110742005, 3168756668, 3066132406, 3665145818, 560153121, 271589392, 4279952895, 4077846003, 3530407890, 3444343245, 202643468, 322250259, 3962553324, 1608629855, 2543990167, 1154254916, 389623319, 3294073796, 2817676711, 2122513534, 1028094525, 1689045092, 1575467613, 422261273, 1939203699, 1621147744, 2174228865, 1339137615, 3699352540, 577127458, 712922154, 2427141008, 2290289544, 1187679302, 3995715566, 3100863416, 339486740, 3732514782, 1591917662, 186455563, 3681988059, 3762019296, 844522546, 978220090, 169743370, 1239126601, 101321734, 611076132, 1558493276, 3260915650, 3547250131, 2901361580, 1655096418, 2443721105, 2510565781, 3828863972, 2039214713, 3878868455, 3359869896, 928607799, 1840765549, 2374762893, 3580146133, 1322425422, 2850048425, 1823791212, 1459268694, 4094161908, 3928346602, 1706019429, 2056189050, 2934523822, 135794696, 3134549946, 2022240376, 628050469, 779246638, 472135708, 2800834470, 3032970164, 3327236038, 3894660072, 3715932637, 1956440180, 522272287, 1272813131, 3185336765, 2340818315, 2323976074, 1888542832, 1044544574, 3049550261, 1722469478, 1222152264, 50660867, 4127324150, 236067854, 1638122081, 895445557, 1475980887, 3117443513, 2257655686, 3243809217, 489110045, 2662934430, 3778599393, 4162055160, 2561878936, 288563729, 1773916777, 3648039385, 2391345038, 2493985684, 2612407707, 505560094, 2274497927, 3911240169, 3460925390, 1442818645, 678973480, 3749357023, 2358182796, 2717407649, 2306869641, 219617805, 3218761151, 3862026214, 1120306242, 1756942440, 1103331905, 2578459033, 762796589, 252780047, 2966125488, 1425844308, 3151392187, 372911126}, {1667474886, 2088535288, 2004326894, 2071694838, 4075949567, 1802223062, 1869591006, 3318043793, 808472672, 16843522, 1734846926, 724270422, 4278065639, 3621216949, 2880169549, 1987484396, 3402253711, 2189597983, 3385409673, 2105378810, 4210693615, 1499065266, 1195886990, 4042263547, 2913856577, 3570689971, 2728590687, 2947541573, 2627518243, 2762274643, 1920112356, 3233831835, 3082273397, 4261223649, 2475929149, 640051788, 909531756, 1061110142, 4160160501, 3435941763, 875846760, 2779116625, 3857003729, 4059105529, 1903268834, 3638064043, 825316194, 353713962, 67374088, 3351728789, 589522246, 3284360861, 404236336, 2526454071, 84217610, 2593830191, 117901582, 303183396, 2155911963, 3806477791, 3958056653, 656894286, 2998062463, 1970642922, 151591698, 2206440989, 741110872, 437923380, 454765878, 1852748508, 1515908788, 2694904667, 1381168804, 993742198, 3604373943, 3014905469, 690584402, 3823320797, 791638366, 2223281939, 1398011302, 3520161977, 0, 3991743681, 538992704, 4244381667, 2981218425, 1532751286, 1785380564, 3419096717, 3200178535, 960056178, 1246420628, 1280103576, 1482221744, 3486468741, 3503319995, 4025428677, 2863326543, 4227536621, 1128514950, 1296947098, 859002214, 2240123921, 1162203018, 4193849577, 33687044, 2139062782, 1347481760, 1010582648, 2678045221, 2829640523, 1364325282, 2745433693, 1077985408, 2408548869, 2459086143, 2644360225, 943212656, 4126475505, 3166494563, 3065430391, 3671750063, 555836226, 269496352, 4294908645, 4092792573, 3537006015, 3452783745, 202118168, 320025894, 3974901699, 1600119230, 2543297077, 1145359496, 387397934, 3301201811, 2812801621, 2122220284, 1027426170, 1684319432, 1566435258, 421079858, 1936954854, 1616945344, 2172753945, 1330631070, 3705438115, 572679748, 707427924, 2425400123, 2290647819, 1179044492, 4008585671, 3099120491, 336870440, 3739122087, 1583276732, 185277718, 3688593069, 3772791771, 842159716, 976899700, 168435220, 1229577106, 101059084, 606366792, 1549591736, 3267517855, 3553849021, 2897014595, 1650632388, 2442242105, 2509612081, 3840161747, 2038008818, 3890688725, 3368567691, 926374254, 1835907034, 2374863873, 3587531953, 1313788572, 2846482505, 1819063512, 1448540844, 4109633523, 3941213647, 1701162954, 2054852340, 2930698567, 134748176, 3132806511, 2021165296, 623210314, 774795868, 471606328, 2795958615, 3031746419, 3334885783, 3907527627, 3722280097, 1953799400, 522133822, 1263263126, 3183336545, 2341176845, 2324333839, 1886425312, 1044267644, 3048588401, 1718004428, 1212733584, 50529542, 4143317495, 235803164, 1633788866, 892690282, 1465383342, 3115962473, 2256965911, 3250673817, 488449850, 2661202215, 3789633753, 4177007595, 2560144171, 286339874, 1768537042, 3654906025, 2391705863, 2492770099, 2610673197, 505291324, 2273808917, 3924369609, 3469625735, 1431699370, 673740880, 3755965093, 2358021891, 2711746649, 2307489801, 218961690, 3217021541, 3873845719, 1111672452, 1751693520, 1094828930, 2576986153, 757954394, 252645662, 2964376443, 1414855848, 3149649517, 370555436}},
{{1042226977, 1042226977, 403442708, 403442708, 697932208, 697932208, 1336584933, 1336584933, 1647391059, 1647391059, 2555137236, 2555137236, 1378429307, 1378429307, 2715671932, 2715671932, 2420656344, 2420656344, 1941222599, 1941222599, 1571005438, 1571005438, 1974974402, 1974974402, 4068047243, 4068047243, 437062935, 437062935, 1882732616, 1882732616, 2142417613, 2142417613, 2487896798, 2487896798, 1907733956, 1907733956, 302582043, 302582043, 1983593293, 1983593293, 965841320, 965841320, 672404540, 672404540, 1235855840, 1235855840, 1210328172, 1210328172, 2646852446, 2646852446, 3688947771, 3688947771, 126783113, 126783113, 631218106, 631218106, 706024767, 706024767, 3294782118, 3294782118, 2211236943, 2211236943, 2512897874, 2512897874, 260737669, 260737669, 1815492186, 1815492186, 3933566367, 3933566367, 4026202645, 4026202645, 3530123707, 3530123707, 3395642799, 3395642799, 2412431941, 2412431941, 1303096294, 1303096294, 528646813, 528646813, 3253595436, 3253595436, 2749160575, 2749160575, 3488279077, 3488279077, 1503764984, 1503764984, 361203602, 361203602, 201721354, 201721354, 899127202, 899127202, 2118074177, 2118074177, 2723238387, 2723238387, 470683154, 470683154, 2008463041, 2008463041, 1673313503, 1673313503, 4202528135, 4202528135, 3630984372, 3630984372, 1849112409, 1849112409, 4294111757, 4294111757, 2244988746, 2244988746, 504303377, 504303377, 2177748300, 2177748300, 336202270, 336202270, 3992714006, 3992714006, 3320835882, 3320835882, 1109467491, 1109467491, 2579611992, 2579611992, 3858759450, 3858759450, 2345191491, 2345191491, 2790478837, 2790478837, 3328402341, 3328402341, 2252555205, 2252555205, 1916352843, 1916352843, 3563743934, 3563743934, 3655459128, 3655459128, 1033081774, 1033081774, 268961816, 268961816, 806885416, 806885416, 1269344483, 1269344483, 1135389935, 1135389935, 461406363, 461406363, 3151024235, 3151024235, 3218264685, 3218264685, 1613770832, 1613770832, 2655997905, 2655997905, 4034427016, 4034427016, 571543859, 571543859, 2278477385, 2278477385, 134480908, 134480908, 563977660, 563977660, 1580150641, 1580150641, 3025820398, 3025820398, 2958579944, 2958579944, 3588745010, 3588745010, 1949973070, 1949973070, 1008606754, 1008606754, 1042226977, 1042226977, 1470539505, 1470539505, 2680341085, 2680341085, 369822493, 369822493, 1874508501, 1874508501, 3832705686, 3832705686, 1781871967, 1781871967, 3597364157, 3597364157, 4160157185, 4160157185, 33620227, 33620227, 2622377682, 2622377682, 3184776046, 3184776046, 2521517021, 2521517021, 3958962195, 3958962195, 2824099068, 2824099068, 3354324521, 3354324521, 2387036105, 2387036105, 495158174, 495158174, 840505643, 840505643, 160008576, 160008576, 2075177163, 2075177163, 874125870, 874125870, 293963156, 293963156, 3698224818, 3698224818, 765172662, 765172662, 4126668546, 4126668546, 1512910199, 1512910199, 327451799, 327451799, 2992200171, 2992200171, 1412049534, 1412049534, 2883115123, 2883115123, 1143087718, 1143087718, 3554993207, 3554993207, 1101901292, 1101901292}, {557719327, 557719327, 337120268, 337120268, 2955516313, 2955516313, 3847203498, 3847203498, 1398944049, 1398944049, 3566750796, 3566750796, 2068982057, 2068982057, 2090982877, 2090982877, 3633334344, 3633334344, 3346248884, 3346248884, 4267549603, 4267549603, 3262494647, 3262494647, 2347923833, 2347923833, 387583245, 387583245, 1215313976, 1215313976, 3447698098, 3447698098, 3734260298, 3734260298, 3295786421, 3295786421, 454166793, 454166793, 1299594043, 1299594043, 2822345105, 2822345105, 1009259540, 1009259540, 3762923945, 3762923945, 1816667172, 1816667172, 1587397571, 1587397571, 1004265696, 1004265696, 2298973838, 2298973838, 3123027871, 3123027871, 1059722517, 1059722517, 2797888098, 2797888098, 1334037708, 1334037708, 1385547719, 1385547719, 2232388234, 2232388234, 1517041206, 1517041206, 2682942837, 2682942837, 368048890, 368048890, 3151128937, 3151128937, 2949277029, 2949277029, 1167051466, 1167051466, 3863849899, 3863849899, 2636087938, 2636087938, 750906861, 750906861, 2141445340, 2141445340, 634383082, 634383082, 4166623649, 4166623649, 2450884487, 2450884487, 168560134, 168560134, 2721421207, 2721421207, 1098792767, 1098792767, 4087501137, 4087501137, 303828494, 303828494, 3245848246, 3245848246, 3747855548, 3747855548, 2281340285, 2281340285, 3034082412, 3034082412, 1500395319, 1500395319, 234877682, 234877682, 1250283471, 1250283471, 287182607, 287182607, 1283575245, 1283575245, 504629770, 504629770, 384695291, 384695291, 717615087, 717615087, 1665278241, 1665278241, 1486471617, 1486471617, 451280895, 451280895, 1133234376, 1133234376, 4121318227, 4121318227, 2781242211, 2781242211, 3313910595, 3313910595, 1265776953, 1265776953, 3201591914, 3201591914, 953803233, 953803233, 2923271059, 2923271059, 403703816, 403703816, 674240536, 674240536, 3813386408, 3813386408, 4014189740, 4014189740, 2602270848, 2602270848, 1807470800, 1807470800, 1841287890, 1841287890, 1348481072, 1348481072, 3516813135, 3516813135, 2297460856, 2297460856, 857870609, 857870609, 1233637070, 1233637070, 201851908, 201851908, 3156319645, 3156319645, 1901997871, 1901997871, 4004797018, 4004797018, 3903871064, 3903871064, 852879335, 852879335, 1316239930, 1316239930, 574365214, 574365214, 557719327, 557719327, 4049053350, 4049053350, 1570751170, 1570751170, 487983883, 487983883, 3580869306, 3580869306, 2531553906, 2531553906, 1600795957, 1600795957, 3184946027, 3184946027, 33027830, 33027830, 50462977, 50462977, 3533459022, 3533459022, 1857934291, 1857934291, 3717614411, 3717614411, 334231800, 334231800, 4238890068, 4238890068, 700968686, 700968686, 3381544775, 3381544775, 2652734339, 2652734339, 724703513, 724703513, 2148108681, 2148108681, 3413881008, 3413881008, 775166490, 775166490, 2484176261, 2484176261, 3000790638, 3000790638, 3056442267, 3056442267, 49674231, 49674231, 2002398509, 2002398509, 2534638724, 2534638724, 3954334041, 3954334041, 2119445034, 2119445034, 1940642008, 1940642008, 1715741218, 1715741218, 936633572, 936633572, 3963727277, 3963727277}, {522272287, 522272287, 202643468, 202643468, 2578459033, 2578459033, 2867154858, 2867154858, 827548209, 827548209, 1289001036, 1289001036, 695947817, 695947817, 3715932637, 3715932637, 1222152264, 1222152264, 3032970164, 3032970164, 2751356323, 2751356323, 3082974647, 3082974647, 2039214713, 2039214713, 219617805, 219617805, 944271416, 944271416, 2999812018, 2999812018, 1256100938, 1256100938, 3049550261, 3049550261, 152769033, 152769033, 994932283, 994932283, 2443721105, 2443721105, 339486740, 339486740, 2850048425, 2850048425, 611076132, 611076132, 3277757891, 3277757891, 3762019296, 3762019296, 2391345038, 2391345038, 2679776671, 2679776671, 356461077, 356461077, 1655096418, 1655096418, 3427763148, 3427763148, 3344078279, 3344078279, 2323976074, 2323976074, 911895606, 911895606, 1973414517, 1973414517, 4195741690, 4195741690, 1773916777, 1773916777, 1706019429, 1706019429, 3393556426, 3393556426, 2883997099, 2883997099, 2191335298, 2191335298, 3979133421, 3979133421, 3699352540, 3699352540, 3928346602, 3928346602, 2717407649, 2717407649, 2274497927, 2274497927, 101321734, 101321734, 2543990167, 2543990167, 1061256767, 1061256767, 1374921297, 1374921297, 236067854, 236067854, 3066132406, 3066132406, 3168756668, 3168756668, 2106063485, 2106063485, 1823791212, 1823791212, 928607799, 928607799, 4061003762, 4061003762, 3477767631, 3477767631, 252780047, 252780047, 3444343245, 3444343245, 169743370, 169743370, 4212583931, 4212583931, 4012557807, 4012557807, 560153121, 560153121, 3243809217, 3243809217, 4279952895, 4279952895, 3359869896, 3359869896, 1408607827, 1408607827, 1671808611, 1671808611, 1137018435, 1137018435, 961245753, 961245753, 1790891114, 1790891114, 3778599393, 3778599393, 2477669779, 2477669779, 135794696, 135794696, 405286936, 405286936, 2833468328, 2833468328, 2901361580, 2901361580, 2157648768, 2157648768, 3496721360, 3496721360, 3530407890, 3530407890, 810573872, 810573872, 1339137615, 1339137615, 2022240376, 2022240376, 288563729, 288563729, 3460925390, 3460925390, 67897348, 67897348, 2646352285, 2646352285, 795958831, 795958831, 1525593178, 1525593178, 1491644504, 1491644504, 3878868455, 3878868455, 978220090, 978220090, 505560094, 505560094, 522272287, 522272287, 2800834470, 2800834470, 3260915650, 3260915650, 186455563, 186455563, 3134549946, 3134549946, 1922491506, 1922491506, 895445557, 895445557, 1807603307, 1807603307, 4127324150, 4127324150, 16974337, 16974337, 1322425422, 1322425422, 3547250131, 3547250131, 1272813131, 1272813131, 4162055160, 4162055160, 1425844308, 1425844308, 3995715566, 3995715566, 1204391495, 1204391495, 2208177539, 2208177539, 422261273, 422261273, 2306869641, 2306869641, 2966125488, 2966125488, 439235610, 439235610, 2241073541, 2241073541, 1857215598, 1857215598, 2612407707, 2612407707, 4144166391, 4144166391, 762796589, 762796589, 2224493444, 2224493444, 1508618841, 1508618841, 712922154, 712922154, 3631459288, 3631459288, 577127458, 577127458, 3828863972, 3828863972, 2917941677, 2917941677}, {522133822, 522133822, 202118168, 202118168, 2576986153, 2576986153, 2863326543, 2863326543, 825316194, 825316194, 1280103576, 1280103576, 690584402, 690584402, 3722280097, 3722280097, 1212733584, 1212733584, 3031746419, 3031746419, 2745433693, 2745433693, 3082273397, 3082273397, 2038008818, 2038008818, 218961690, 218961690, 943212656, 943212656, 2998062463, 2998062463, 1246420628, 1246420628, 3048588401, 3048588401, 151591698, 151591698, 993742198, 993742198, 2442242105, 2442242105, 336870440, 336870440, 2846482505, 2846482505, 606366792, 606366792, 3284360861, 3284360861, 3772791771, 3772791771, 2391705863, 2391705863, 2678045221, 2678045221, 353713962, 353713962, 1650632388, 1650632388, 3435941763, 3435941763, 3351728789, 3351728789, 2324333839, 2324333839, 909531756, 909531756, 1970642922, 1970642922, 4210693615, 4210693615, 1768537042, 1768537042, 1701162954, 1701162954, 3402253711, 3402253711, 2880169549, 2880169549, 2189597983, 2189597983, 3991743681, 3991743681, 3705438115, 3705438115, 3941213647, 3941213647, 2711746649, 2711746649, 2273808917, 2273808917, 101059084, 101059084, 2543297077, 2543297077, 1061110142, 1061110142, 1364325282, 1364325282, 235803164, 235803164, 3065430391, 3065430391, 3166494563, 3166494563, 2105378810, 2105378810, 1819063512, 1819063512, 926374254, 926374254, 4075949567, 4075949567, 3486468741, 3486468741, 252645662, 252645662, 3452783745, 3452783745, 168435220, 168435220, 4227536621, 4227536621, 4025428677, 4025428677, 555836226, 555836226, 3250673817, 3250673817, 4294908645, 4294908645, 3368567691, 3368567691, 1398011302, 1398011302, 1667474886, 1667474886, 1128514950, 1128514950, 960056178, 960056178, 1785380564, 1785380564, 3789633753, 3789633753, 2475929149, 2475929149, 134748176, 134748176, 404236336, 404236336, 2829640523, 2829640523, 2897014595, 2897014595, 2155911963, 2155911963, 3503319995, 3503319995, 3537006015, 3537006015, 808472672, 808472672, 1330631070, 1330631070, 2021165296, 2021165296, 286339874, 286339874, 3469625735, 3469625735, 67374088, 67374088, 2644360225, 2644360225, 791638366, 791638366, 1515908788, 1515908788, 1482221744, 1482221744, 3890688725, 3890688725, 976899700, 976899700, 505291324, 505291324, 522133822, 522133822, 2795958615, 2795958615, 3267517855, 3267517855, 185277718, 185277718, 3132806511, 3132806511, 1920112356, 1920112356, 892690282, 892690282, 1802223062, 1802223062, 4143317495, 4143317495, 16843522, 16843522, 1313788572, 1313788572, 3553849021, 3553849021, 1263263126, 1263263126, 4177007595, 4177007595, 1414855848, 1414855848, 4008585671, 4008585671, 1195886990, 1195886990, 2206440989, 2206440989, 421079858, 421079858, 2307489801, 2307489801, 2964376443, 2964376443, 437923380, 437923380, 2240123921, 2240123921, 1852748508, 1852748508, 2610673197, 2610673197, 4160160501, 4160160501, 757954394, 757954394, 2223281939, 2223281939, 1499065266, 1499065266, 707427924, 707427924, 3638064043, 3638064043, 572679748, 572679748, 3840161747, 3840161747, 2913856577, 2913856577}},
{{1202630377, 3765465232, 3362022572, 899127202, 1101901292, 3825007647, 1445669757, 3218264685, 4092916743, 1470539505, 840505643, 739644986, 798661301, 260737669, 2142417613, 168101135, 1336584933, 100860677, 394692241, 1503764984, 2883115123, 3866325909, 3050821474, 4034427016, 3454790438, 1949973070, 874125870, 1841019862, 2218934982, 4026202645, 4068047243, 907746093, 2924959737, 2244988746, 1235855840, 1479289972, 4269768577, 1916352843, 3184776046, 999329963, 4160157185, 3967186586, 2454276571, 1033081774, 3688947771, 2588757463, 2983581028, 3630984372, 3722699582, 504303377, 672404540, 1202630377, 831886756, 2252555205, 2521517021, 1714631509, 3597364157, 2286175436, 3899946140, 268961816, 126783113, 3126681063, 941366308, 4202528135, 2108928974, 3992714006, 302582043, 1748251740, 235341577, 2345191491, 3354324521, 3429263018, 1874508501, 3622233649, 1604494077, 1135389935, 3294782118, 3160301282, 1403299063, 470683154, 1176707941, 3320835882, 1907733956, 227249030, 3496503480, 3858759450, 2387036105, 2857719295, 369822493, 3892248089, 3387549984, 461406363, 2211236943, 26054028, 2319795663, 2723238387, 1647391059, 773265209, 1681011286, 3488279077, 1412049534, 865375399, 1639824860, 571543859, 2816401017, 3588745010, 293963156, 932615841, 664706745, 3530123707, 1974974402, 4000806809, 2715671932, 67240454, 2311702848, 528646813, 1512910199, 4135287693, 4101667470, 563977660, 3791519004, 1580150641, 4126668546, 1546530418, 1537253627, 1303096294, 2420656344, 2479146071, 1202630377, 3765465232, 3362022572, 899127202, 1101901292, 3825007647, 1445669757, 3218264685, 4092916743, 1470539505, 840505643, 739644986, 798661301, 260737669, 2142417613, 168101135, 1336584933, 100860677, 394692241, 1503764984, 2883115123, 3866325909, 3050821474, 4034427016, 3454790438, 1949973070, 874125870, 1841019862, 2218934982, 4026202645, 4068047243, 907746093, 2924959737, 2244988746, 1235855840, 1479289972, 4269768577, 1916352843, 3184776046, 999329963, 4160157185, 3967186586, 2454276571, 1033081774, 3688947771, 2588757463, 2983581028, 3630984372, 3722699582, 504303377, 672404540, 1202630377, 831886756, 2252555205, 2521517021, 1714631509, 3597364157, 2286175436, 3899946140, 268961816, 126783113, 3126681063, 941366308, 4202528135, 2108928974, 3992714006, 302582043, 1748251740, 235341577, 2345191491, 3354324521, 3429263018, 1874508501, 3622233649, 1604494077, 1135389935, 3294782118, 3160301282, 1403299063, 470683154, 1176707941, 3320835882, 1907733956, 227249030, 3496503480, 3858759450, 2387036105, 2857719295, 369822493, 3892248089, 3387549984, 461406363, 2211236943, 26054028, 2319795663, 2723238387, 1647391059, 773265209, 1681011286, 3488279077, 1412049534, 865375399, 1639824860, 571543859, 2816401017, 3588745010, 293963156, 932615841, 664706745, 3530123707, 1974974402, 4000806809, 2715671932, 67240454, 2311702848, 528646813, 1512910199, 4135287693, 4101667470, 563977660, 3791519004, 1580150641, 4126668546, 1546530418, 1537253627, 1303096294, 2420656344, 2479146071}, {3913789102, 2430627952, 2898814052, 2721421207, 3963727277, 535035132, 2102799147, 1841287890, 133428468, 4049053350, 724703513, 975967766, 3039795866, 2232388234, 3447698098, 252314885, 3847203498, 84280067, 2434238086, 4166623649, 1940642008, 2514908019, 1656084439, 2297460856, 651029483, 1316239930, 775166490, 3597515707, 3330556482, 368048890, 2347923833, 758520603, 4188952407, 1250283471, 3762923945, 1951935532, 2180939647, 1265776953, 1857934291, 2872807568, 33027830, 2599188086, 3683797321, 2923271059, 1004265696, 3617213773, 1689376213, 3034082412, 1054729187, 287182607, 1009259540, 3913789102, 2754712981, 3313910595, 3717614411, 1432761139, 3184946027, 3431482436, 2632479860, 403703816, 2298973838, 3887750493, 607656988, 2281340285, 3464344499, 384695291, 454166793, 1550332980, 151914247, 1133234376, 700968686, 2865522278, 3580869306, 836232934, 4250903202, 4014189740, 2797888098, 3803995742, 4149453988, 303828494, 1699095331, 717615087, 3295786421, 2249034635, 3100665960, 451280895, 3381544775, 4289353045, 487983883, 434634494, 550103529, 2602270848, 1334037708, 2348912013, 3481945413, 4087501137, 1398944049, 959321879, 1449407026, 634383082, 2119445034, 2805175444, 3697393085, 857870609, 2041044702, 852879335, 2484176261, 2704774806, 3106381470, 3151128937, 3262494647, 2582542199, 2090982877, 100925954, 1082771913, 2636087938, 2002398509, 2381740923, 2398386810, 3156319645, 484572669, 1901997871, 49674231, 1918643758, 4217086112, 3863849899, 3633334344, 1469301956, 3913789102, 2430627952, 2898814052, 2721421207, 3963727277, 535035132, 2102799147, 1841287890, 133428468, 4049053350, 724703513, 975967766, 3039795866, 2232388234, 3447698098, 252314885, 3847203498, 84280067, 2434238086, 4166623649, 1940642008, 2514908019, 1656084439, 2297460856, 651029483, 1316239930, 775166490, 3597515707, 3330556482, 368048890, 2347923833, 758520603, 4188952407, 1250283471, 3762923945, 1951935532, 2180939647, 1265776953, 1857934291, 2872807568, 33027830, 2599188086, 3683797321, 2923271059, 1004265696, 3617213773, 1689376213, 3034082412, 1054729187, 287182607, 1009259540, 3913789102, 2754712981, 3313910595, 3717614411, 1432761139, 3184946027, 3431482436, 2632479860, 403703816, 2298973838, 3887750493, 607656988, 2281340285, 3464344499, 384695291, 454166793, 1550332980, 151914247, 1133234376, 700968686, 2865522278, 3580869306, 836232934, 4250903202, 4014189740, 2797888098, 3803995742, 4149453988, 303828494, 1699095331, 717615087, 3295786421, 2249034635, 3100665960, 451280895, 3381544775, 4289353045, 487983883, 434634494, 550103529, 2602270848, 1334037708, 2348912013, 3481945413, 4087501137, 1398944049, 959321879, 1449407026, 634383082, 2119445034, 2805175444, 3697393085, 857870609, 2041044702, 852879335, 2484176261, 2704774806, 3106381470, 3151128937, 3262494647, 2582542199, 2090982877, 100925954, 1082771913, 2636087938, 2002398509, 2381740923, 2398386810, 3156319645, 484572669, 1901997871, 49674231, 1918643758, 4217086112, 3863849899, 3633334344, 1469301956}, {2934523822, 1888542832, 1689045092, 2543990167, 2917941677, 4229948412, 729634347, 3530407890, 4094161908, 2800834470, 422261273, 372911126, 2595565466, 2323976074, 2999812018, 84871685, 2867154858, 50660867, 2257655686, 2717407649, 3631459288, 1939203699, 3613570519, 2022240376, 3945188843, 978220090, 439235610, 3151392187, 1120306242, 4195741690, 2039214713, 455947803, 1475980887, 3477767631, 2850048425, 745822252, 2139225727, 961245753, 3547250131, 2427141008, 4127324150, 1989864566, 1239126601, 2477669779, 3762019296, 1305975373, 3580146133, 1823791212, 3812548067, 252780047, 339486740, 2934523822, 2510565781, 1137018435, 1272813131, 861234739, 1807603307, 1154254916, 1956440180, 135794696, 2391345038, 1575467613, 472135708, 2106063485, 3016654259, 4212583931, 152769033, 878471220, 118033927, 3359869896, 3995715566, 1722469478, 3134549946, 3862026214, 2734514082, 2901361580, 1655096418, 1591917662, 2767672228, 236067854, 593839651, 4012557807, 3049550261, 2340818315, 1756942440, 4279952895, 1204391495, 1442818645, 186455563, 4263110654, 3911240169, 2157648768, 3427763148, 2374762893, 1171229253, 1374921297, 827548209, 389623319, 844522546, 3928346602, 712922154, 2493985684, 3185336765, 288563729, 3732514782, 3878868455, 2241073541, 2527147926, 2662934430, 1773916777, 3082974647, 2006576759, 3715932637, 33948674, 3376449993, 2191335298, 762796589, 2072901243, 2056189050, 2646352285, 4246528509, 795958831, 4144166391, 779246638, 2700827552, 2883997099, 1222152264, 3294073796, 2934523822, 1888542832, 1689045092, 2543990167, 2917941677, 4229948412, 729634347, 3530407890, 4094161908, 2800834470, 422261273, 372911126, 2595565466, 2323976074, 2999812018, 84871685, 2867154858, 50660867, 2257655686, 2717407649, 3631459288, 1939203699, 3613570519, 2022240376, 3945188843, 978220090, 439235610, 3151392187, 1120306242, 4195741690, 2039214713, 455947803, 1475980887, 3477767631, 2850048425, 745822252, 2139225727, 961245753, 3547250131, 2427141008, 4127324150, 1989864566, 1239126601, 2477669779, 3762019296, 1305975373, 3580146133, 1823791212, 3812548067, 252780047, 339486740, 2934523822, 2510565781, 1137018435, 1272813131, 861234739, 1807603307, 1154254916, 1956440180, 135794696, 2391345038, 1575467613, 472135708, 2106063485, 3016654259, 4212583931, 152769033, 878471220, 118033927, 3359869896, 3995715566, 1722469478, 3134549946, 3862026214, 2734514082, 2901361580, 1655096418, 1591917662, 2767672228, 236067854, 593839651, 4012557807, 3049550261, 2340818315, 1756942440, 4279952895, 1204391495, 1442818645, 186455563, 4263110654, 3911240169, 2157648768, 3427763148, 2374762893, 1171229253, 1374921297, 827548209, 389623319, 844522546, 3928346602, 712922154, 2493985684, 3185336765, 288563729, 3732514782, 3878868455, 2241073541, 2527147926, 2662934430, 1773916777, 3082974647, 2006576759, 3715932637, 33948674, 3376449993, 2191335298, 762796589, 2072901243, 2056189050, 2646352285, 4246528509, 795958831, 4144166391, 779246638, 2700827552, 2883997099, 1222152264, 3294073796}, {2930698567, 1886425312, 1684319432, 2543297077, 2913856577, 4244381667, 724270422, 3537006015, 4109633523, 2795958615, 421079858, 370555436, 2593830191, 2324333839, 2998062463, 84217610, 2863326543, 50529542, 2256965911, 2711746649, 3638064043, 1936954854, 3621216949, 2021165296, 3958056653, 976899700, 437923380, 3149649517, 1111672452, 4210693615, 2038008818, 454765878, 1465383342, 3486468741, 2846482505, 741110872, 2139062782, 960056178, 3553849021, 2425400123, 4143317495, 1987484396, 1229577106, 2475929149, 3772791771, 1296947098, 3587531953, 1819063512, 3823320797, 252645662, 336870440, 2930698567, 2509612081, 1128514950, 1263263126, 859002214, 1802223062, 1145359496, 1953799400, 134748176, 2391705863, 1566435258, 471606328, 2105378810, 3014905469, 4227536621, 151591698, 875846760, 117901582, 3368567691, 4008585671, 1718004428, 3132806511, 3873845719, 2728590687, 2897014595, 1650632388, 1583276732, 2762274643, 235803164, 589522246, 4025428677, 3048588401, 2341176845, 1751693520, 4294908645, 1195886990, 1431699370, 185277718, 4278065639, 3924369609, 2155911963, 3435941763, 2374863873, 1162203018, 1364325282, 825316194, 387397934, 842159716, 3941213647, 707427924, 2492770099, 3183336545, 286339874, 3739122087, 3890688725, 2240123921, 2526454071, 2661202215, 1768537042, 3082273397, 2004326894, 3722280097, 33687044, 3385409673, 2189597983, 757954394, 2071694838, 2054852340, 2644360225, 4261223649, 791638366, 4160160501, 774795868, 2694904667, 2880169549, 1212733584, 3301201811, 2930698567, 1886425312, 1684319432, 2543297077, 2913856577, 4244381667, 724270422, 3537006015, 4109633523, 2795958615, 421079858, 370555436, 2593830191, 2324333839, 2998062463, 84217610, 2863326543, 50529542, 2256965911, 2711746649, 3638064043, 1936954854, 3621216949, 2021165296, 3958056653, 976899700, 437923380, 3149649517, 1111672452, 4210693615, 2038008818, 454765878, 1465383342, 3486468741, 2846482505, 741110872, 2139062782, 960056178, 3553849021, 2425400123, 4143317495, 1987484396, 1229577106, 2475929149, 3772791771, 1296947098, 3587531953, 1819063512, 3823320797, 252645662, 336870440, 2930698567, 2509612081, 1128514950, 1263263126, 859002214, 1802223062, 1145359496, 1953799400, 134748176, 2391705863, 1566435258, 471606328, 2105378810, 3014905469, 4227536621, 151591698, 875846760, 117901582, 3368567691, 4008585671, 1718004428, 3132806511, 3873845719, 2728590687, 2897014595, 1650632388, 1583276732, 2762274643, 235803164, 589522246, 4025428677, 3048588401, 2341176845, 1751693520, 4294908645, 1195886990, 1431699370, 185277718, 4278065639, 3924369609, 2155911963, 3435941763, 2374863873, 1162203018, 1364325282, 825316194, 387397934, 842159716, 3941213647, 707427924, 2492770099, 3183336545, 286339874, 3739122087, 3890688725, 2240123921, 2526454071, 2661202215, 1768537042, 3082273397, 2004326894, 3722280097, 33687044, 3385409673, 2189597983, 757954394, 2071694838, 2054852340, 2644360225, 4261223649, 791638366, 4160160501, 774795868, 2694904667, 2880169549, 1212733584, 3301201811}}
// [[[end]]]
};
int main(int argc, char** argv) {
// args: [table#] [parameter], prints space-separated indices into the table
int ti = atoi(argv[1]);
uint32_t target = atoll(argv[2]);
if (ti == 0) {
#pragma omp parallel for schedule(static, 64)
for (int a = 0; a < 256; a++) {
for (int b = 0; b < 256; b++) {
for (int c = 0; c < 256; c++) {
for (int d = 0; d < 256; d++) {
uint32_t res = table[0][0][a] ^ table[0][1][b] ^ table[0][2][c] ^ table[0][3][d];
if (res == target) {
printf("%d %d %d %d\n", a, b, c, d);
//return 0;
}
}
}
}
}
} else if (ti == 1) {
#pragma omp parallel for schedule(static)
for (int a = 0; a < 256; a += 2) {
for (int b = 0; b < 256; b += 2) {
for (int c = 0; c < 256; c += 2) {
for (int d = 0; d < 256; d += 2) {
uint32_t res = table[1][0][a] ^ table[1][1][b] ^ table[1][2][c] ^ table[1][3][d];
if (res == target) {
// [[[cog
// import cog, itertools
// for a,b,c,d in itertools.product([0, 1], repeat=4):
// cog.outl(f'printf("%d %d %d %d\\n", a|{a}, b|{b}, c|{c}, d|{d});')
// ]]]
printf("%d %d %d %d\n", a|0, b|0, c|0, d|0);
printf("%d %d %d %d\n", a|0, b|0, c|0, d|1);
printf("%d %d %d %d\n", a|0, b|0, c|1, d|0);
printf("%d %d %d %d\n", a|0, b|0, c|1, d|1);
printf("%d %d %d %d\n", a|0, b|1, c|0, d|0);
printf("%d %d %d %d\n", a|0, b|1, c|0, d|1);
printf("%d %d %d %d\n", a|0, b|1, c|1, d|0);
printf("%d %d %d %d\n", a|0, b|1, c|1, d|1);
printf("%d %d %d %d\n", a|1, b|0, c|0, d|0);
printf("%d %d %d %d\n", a|1, b|0, c|0, d|1);
printf("%d %d %d %d\n", a|1, b|0, c|1, d|0);
printf("%d %d %d %d\n", a|1, b|0, c|1, d|1);
printf("%d %d %d %d\n", a|1, b|1, c|0, d|0);
printf("%d %d %d %d\n", a|1, b|1, c|0, d|1);
printf("%d %d %d %d\n", a|1, b|1, c|1, d|0);
printf("%d %d %d %d\n", a|1, b|1, c|1, d|1);
/// [[[end]]]
//return 0;
}
}
}
}
}
} else if (ti == 2) {
#pragma omp parallel for schedule(static)
for (int a = 0; a < 128; a++) {
for (int b = 0; b < 128; b++) {
for (int c = 0; c < 128; c++) {
for (int d = 0; d < 128; d++) {
uint32_t res = table[2][0][a] ^ table[2][1][b] ^ table[2][2][c] ^ table[2][3][d];
if (res == target) {
// [[[cog
// import cog, itertools
// for a,b,c,d in itertools.product([0, 128], repeat=4):
// cog.outl(f'printf("%d %d %d %d\\n", a|{a}, b|{b}, c|{c}, d|{d});')
// ]]]
printf("%d %d %d %d\n", a|0, b|0, c|0, d|0);
printf("%d %d %d %d\n", a|0, b|0, c|0, d|128);
printf("%d %d %d %d\n", a|0, b|0, c|128, d|0);
printf("%d %d %d %d\n", a|0, b|0, c|128, d|128);
printf("%d %d %d %d\n", a|0, b|128, c|0, d|0);
printf("%d %d %d %d\n", a|0, b|128, c|0, d|128);
printf("%d %d %d %d\n", a|0, b|128, c|128, d|0);
printf("%d %d %d %d\n", a|0, b|128, c|128, d|128);
printf("%d %d %d %d\n", a|128, b|0, c|0, d|0);
printf("%d %d %d %d\n", a|128, b|0, c|0, d|128);
printf("%d %d %d %d\n", a|128, b|0, c|128, d|0);
printf("%d %d %d %d\n", a|128, b|0, c|128, d|128);
printf("%d %d %d %d\n", a|128, b|128, c|0, d|0);
printf("%d %d %d %d\n", a|128, b|128, c|0, d|128);
printf("%d %d %d %d\n", a|128, b|128, c|128, d|0);
printf("%d %d %d %d\n", a|128, b|128, c|128, d|128);
/// [[[end]]]
//return 0;
}
}
}
}
}
}
return 0;
}
|
d2d_memcpy.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-aarch64-unknown-linux-gnu | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-powerpc64-ibm-linux-gnu | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-powerpc64le-ibm-linux-gnu | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-x86_64-pc-linux-gnu | %fcheck-x86_64-pc-linux-gnu -allow-empty
// RUN: %libomptarget-compile-nvptx64-nvidia-cuda && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-nvptx64-nvidia-cuda | %fcheck-nvptx64-nvidia-cuda -allow-empty
#include <assert.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
const int magic_num = 7;
int main(int argc, char *argv[]) {
const int N = 128;
const int num_devices = omp_get_num_devices();
// No target device, just return
if (num_devices == 0) {
printf("PASS\n");
return 0;
}
const int src_device = 0;
int dst_device = num_devices - 1;
int length = N * sizeof(int);
int *src_ptr = omp_target_alloc(length, src_device);
int *dst_ptr = omp_target_alloc(length, dst_device);
assert(src_ptr && "src_ptr is NULL");
assert(dst_ptr && "dst_ptr is NULL");
#pragma omp target teams distribute parallel for device(src_device) \
is_device_ptr(src_ptr)
for (int i = 0; i < N; ++i) {
src_ptr[i] = magic_num;
}
int rc =
omp_target_memcpy(dst_ptr, src_ptr, length, 0, 0, dst_device, src_device);
assert(rc == 0 && "error in omp_target_memcpy");
int *buffer = malloc(length);
assert(buffer && "failed to allocate host buffer");
#pragma omp target teams distribute parallel for device(dst_device) \
map(from: buffer[0:N]) is_device_ptr(dst_ptr)
for (int i = 0; i < N; ++i) {
buffer[i] = dst_ptr[i] + magic_num;
}
for (int i = 0; i < N; ++i)
assert(buffer[i] == 2 * magic_num);
printf("PASS\n");
// Free host and device memory
free(buffer);
omp_target_free(src_ptr, src_device);
omp_target_free(dst_ptr, dst_device);
return 0;
}
// CHECK: PASS
|
bml_normalize_ellsort_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_normalize.h"
#include "../bml_parallel.h"
#include "../bml_types.h"
#include "bml_add_ellsort.h"
#include "bml_allocate_ellsort.h"
#include "bml_normalize_ellsort.h"
#include "bml_scale_ellsort.h"
#include "bml_types_ellsort.h"
#include <complex.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* Normalize ellsort matrix given Gershgorin bounds.
*
* \ingroup normalize_group
*
* \param A The matrix
* \param mineval Calculated min value
* \param maxeval Calculated max value
*/
void TYPED_FUNC(
bml_normalize_ellsort) (
bml_matrix_ellsort_t * A,
double mineval,
double maxeval)
{
double maxminusmin = maxeval - mineval;
double gershfact = maxeval / maxminusmin;
REAL_T scalar = (REAL_T) - 1.0 / maxminusmin;
double threshold = 0.0;
bml_scale_inplace_ellsort(&scalar, A);
bml_add_identity_ellsort(A, gershfact, threshold);
}
/** Calculate Gershgorin bounds for an ellsort matrix.
*
* \ingroup normalize_group
*
* \param A The matrix
* \param nrows Number of rows to use
* returns mineval Calculated min value
* returns maxeval Calculated max value
*/
void *TYPED_FUNC(
bml_gershgorin_ellsort) (
bml_matrix_ellsort_t * A)
{
REAL_T radius, absham, dvalue;
double emin = DBL_MAX;
double emax = DBL_MIN;
double *eval = bml_allocate_memory(sizeof(double) * 2);
int N = A->N;
int M = A->M;
int *A_nnz = (int *) A->nnz;
int *A_index = (int *) A->index;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int myRank = bml_getMyRank();
REAL_T rad[N];
REAL_T dval[N];
REAL_T *A_value = (REAL_T *) A->value;
#pragma omp parallel for \
shared(N, M, A_nnz, A_index, A_value) \
shared(A_localRowMin, A_localRowMax, myRank) \
shared(rad, dval) \
private(absham, radius, dvalue) \
reduction(max:emax) \
reduction(min:emin)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
radius = 0.0;
dvalue = 0.0;
for (int j = 0; j < A_nnz[i]; j++)
{
if (i == A_index[ROWMAJOR(i, j, N, M)])
dvalue = A_value[ROWMAJOR(i, j, N, M)];
else
{
absham = ABS(A_value[ROWMAJOR(i, j, N, M)]);
radius += (double) absham;
}
}
dval[i] = dvalue;
rad[i] = radius;
/*
emax =
(emax >
REAL_PART(dvalue + radius) ? emax : REAL_PART(dvalue + radius));
emin =
(emin <
REAL_PART(dvalue - radius) ? emin : REAL_PART(dvalue - radius));
*/
}
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
if (REAL_PART(dval[i] + rad[i]) > emax)
emax = REAL_PART(dval[i] + rad[i]);
if (REAL_PART(dval[i] - rad[i]) < emin)
emin = REAL_PART(dval[i] - rad[i]);
}
//printf("%d: emin = %e emax = %e\n", myRank, emin, emax);
#ifdef DO_MPI
if (bml_getNRanks() > 1 && A->distribution_mode == distributed)
{
bml_minRealReduce(&emin);
bml_maxRealReduce(&emax);
}
#endif
eval[0] = emin;
eval[1] = emax;
return eval;
}
/** Calculate Gershgorin bounds for a partial ellsort matrix.
*
* \ingroup normalize_group
*
* \param A The matrix
* \param nrows Number of rows to use
* returns mineval Calculated min value
* returns maxeval Calculated max value
*/
void *TYPED_FUNC(
bml_gershgorin_partial_ellsort) (
bml_matrix_ellsort_t * A,
int nrows)
{
REAL_T radius, absham, dvalue;
double emin = DBL_MAX;
double emax = DBL_MIN;
double *eval = bml_allocate_memory(sizeof(double) * 2);
int N = A->N;
int M = A->M;
int *A_nnz = (int *) A->nnz;
int *A_index = (int *) A->index;
REAL_T rad[N];
REAL_T dval[N];
REAL_T *A_value = (REAL_T *) A->value;
#pragma omp parallel for \
shared(N, M, A_nnz, A_index, A_value) \
shared(rad, dval) \
private(absham, radius, dvalue) \
reduction(max:emax) \
reduction(min:emin)
for (int i = 0; i < nrows; i++)
{
radius = 0.0;
dvalue = 0.0;
for (int j = 0; j < A_nnz[i]; j++)
{
if (i == A_index[ROWMAJOR(i, j, N, M)])
dvalue = A_value[ROWMAJOR(i, j, N, M)];
else
{
absham = ABS(A_value[ROWMAJOR(i, j, N, M)]);
radius += (double) absham;
}
}
dval[i] = dvalue;
rad[i] = radius;
}
for (int i = 0; i < nrows; i++)
{
if (REAL_PART(dval[i] + rad[i]) > emax)
emax = REAL_PART(dval[i] + rad[i]);
if (REAL_PART(dval[i] - rad[i]) < emin)
emin = REAL_PART(dval[i] - rad[i]);
}
eval[0] = emin;
eval[1] = emax;
return eval;
}
|
GB_unop__identity_uint16_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_bool)
// op(A') function: GB (_unop_tran__identity_uint16_bool)
// C type: uint16_t
// A type: bool
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_bool)
(
uint16_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
im2col_dnnlowp.h | #pragma once
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
template <typename T>
static void Im2ColNCHW(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const T* data_im,
T* data_col,
CPUContext* /*context*/,
const T& zero_point = 0) {
const int output_h =
(height + pad_b + pad_t - (dilation_h * (kernel_h - 1) + 1)) / stride_h +
1;
const int output_w =
(width + pad_l + pad_r - (dilation_w * (kernel_w - 1) + 1)) / stride_w +
1;
// Fast path for zero padding and no dilation
// From Torch, THNN_(unfolded_copy)
if (dilation_h == 1 && dilation_w == 1 && pad_l == 0 && pad_r == 0 &&
pad_t == 0 && pad_b == 0) {
for (auto k = 0; k < channels * kernel_h * kernel_w; k++) {
const auto nip = k / (kernel_h * kernel_w);
const auto rest = k % (kernel_h * kernel_w);
const auto kh = rest / kernel_w;
const auto kw = rest % kernel_w;
auto* dst = data_col + nip * (kernel_h * kernel_w * output_h * output_w) +
kh * (kernel_w * output_h * output_w) + kw * (output_h * output_w);
const auto* src = data_im + nip * (height * width);
for (auto y = 0; y < output_h; y++) {
const auto iy = y * stride_h + kh;
const auto ix = kw;
if (stride_w == 1) {
memcpy(
dst + (y * output_w),
src + (iy * width + ix),
sizeof(T) * output_w);
} else {
for (auto x = 0; x < output_w; x++) {
memcpy(
dst + (y * output_w + x),
src + (iy * width + ix + x * stride_w),
sizeof(T));
}
}
}
}
return;
}
// Fast path for equal padding
if (pad_l == pad_r && pad_t == pad_b) {
// From Intel, https://github.com/BVLC/caffe/pull/3536
const int pad_h = pad_t;
const int pad_w = pad_l;
const int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h + kernel_row * dilation_h;
for (int output_rows = output_h; output_rows; output_rows--) {
if (!utils::IsAGeZeroAndALtB(input_row, height)) {
for (int output_cols = output_w; output_cols; output_cols--) {
*(data_col++) = zero_point;
}
} else {
int input_col = -pad_w + kernel_col * dilation_w;
for (int output_col = output_w; output_col; output_col--) {
if (utils::IsAGeZeroAndALtB(input_col, width)) {
*(data_col++) = data_im[input_row * width + input_col];
} else {
*(data_col++) = zero_point;
}
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
return;
}
// Baseline
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int channels_col = channels * kernel_h * kernel_w;
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % kernel_w;
int h_offset = (c / kernel_w) % kernel_h;
int c_im = c / kernel_h / kernel_w;
for (int h = 0; h < height_col; ++h) {
for (int w = 0; w < width_col; ++w) {
int h_pad = h * stride_h - pad_t + h_offset * dilation_h;
int w_pad = w * stride_w - pad_l + w_offset * dilation_w;
if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width)
data_col[(c * height_col + h) * width_col + w] =
data_im[(c_im * height + h_pad) * width + w_pad];
else
data_col[(c * height_col + h) * width_col + w] = zero_point;
}
}
}
}
template <typename T>
static void Im2ColNdNCHW(
const int N,
const int /* img_size*/,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const T* X_data,
T* Y_data,
CPUContext* /* context */,
const T& zero_point = 0) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
std::vector<int> d_offset(N, 0);
std::vector<int> d_iter(N, 0);
for (int i = 0; i < outer_size; ++i) {
// Loop over spatial axes in reverse order to compute a per-axis offset.
int offset = i;
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset % kernel_shape[d_i];
offset /= kernel_shape[d_i];
}
for (int j = 0; j < inner_size; ++j) {
// Loop over spatial axes in forward order to compute the indices in the
// image and column, and whether the index lies in the padding.
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride[d_i] - pad[d_i] +
d_offset[d_i] * dilation[d_i];
is_padding |= d_img < 0 || d_img >= img_shape[d_i + 1];
img_index = img_index * img_shape[d_i + 1] + d_img;
}
Y_data[col_index] = is_padding ? zero_point : X_data[img_index];
utils::IncreaseIndexInDims(N, col_shape + 1, d_iter.data());
}
}
}
/**
* The layout of the result is N H W G R S C/G.
* Note that groups are pulled out to an outer dimension so that we can use
* GEMMs efficiently.
*/
template <typename T>
static void Im2ColNHWC(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const T* data_im,
T* data_col,
CPUContext* /*context*/,
const int groups,
const T& zero_point) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
#ifdef _OPENMP
#pragma omp parallel for if (!omp_in_parallel())
#endif
for (int h = 0; h < height_col; ++h) {
int h_pad = -pad_t + h * stride_h;
T* data_col_temp =
data_col + h * width_col * kernel_h * kernel_w * channels;
int w_pad = -pad_l;
for (int w = 0; w < width_col; ++w) {
int r = 0;
for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h, ++r) {
int s = 0;
for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w, ++s) {
if (ih >= 0 && ih < height && iw >= 0 && iw < width) {
for (int g = 0; g < groups; ++g) {
memcpy(
data_col_temp +
((g * kernel_h + r) * kernel_w + s) * (channels / groups),
data_im + (ih * width + iw) * channels +
g * (channels / groups),
sizeof(T) * (channels / groups));
}
} else {
// This should be simply padded with zero.
for (int g = 0; g < groups; ++g) {
for (int i = 0; i < channels / groups; ++i) {
data_col_temp
[(((g * kernel_h + r) * kernel_w) + s) *
(channels / groups) +
i] = zero_point;
}
}
}
} // for each iw
} // for each ih
data_col_temp += kernel_h * kernel_w * channels;
w_pad += stride_w;
} // for each output pixel
} // for each image row
}
/**
* The layout of the result is N T H W G Q R S C/G.
* Note that groups are pulled out to an outer dimension so that we can use
* GEMMs efficiently.
*/
template <typename T>
static void Im2Col3DNHWC(
const int channels,
const int num_frames,
const int height,
const int width,
const int kernel_t,
const int kernel_h,
const int kernel_w,
const int dilation_t,
const int dilation_h,
const int dilation_w,
const int pad_p, // previous frame
const int pad_t, // top
const int pad_l, // left
const int pad_n, // next frame
const int pad_b, // bottom
const int pad_r, // right
const int stride_t,
const int stride_h,
const int stride_w,
const T* data_im,
T* data_col,
CPUContext* /*context*/,
const int groups,
const T& zero_point) {
const int dkernel_t = dilation_t * (kernel_t - 1) + 1;
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int frame_col = (num_frames + pad_p + pad_n - dkernel_t) / stride_t + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
#ifdef _OPENMP
#pragma omp parallel for if (!omp_in_parallel())
#endif
for (int t = 0; t < frame_col; ++t) {
int t_pad = -pad_p + t * stride_t;
for (int h = 0; h < height_col; ++h) {
int h_pad = -pad_t + h * stride_h;
T* data_col_temp = data_col +
(t * height_col + h) * width_col * kernel_t * kernel_h * kernel_w *
channels;
for (int w = 0; w < width_col; ++w) {
int w_pad = -pad_l + w * stride_w;
int q = 0;
for (int it = t_pad; it < t_pad + dkernel_t; it += dilation_t, ++q) {
int r = 0;
for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h, ++r) {
int s = 0;
for (int iw = w_pad; iw < w_pad + dkernel_w;
iw += dilation_w, ++s) {
if (it >= 0 && it < num_frames && ih >= 0 && ih < height &&
iw >= 0 && iw < width) {
for (int g = 0; g < groups; ++g) {
memcpy(
data_col_temp +
(((g * kernel_t + q) * kernel_h + r) * kernel_w + s) *
(channels / groups),
data_im + ((it * height + ih) * width + iw) * channels +
g * (channels / groups),
sizeof(T) * (channels / groups));
}
} else {
// This should be simply padded with zero.
for (int g = 0; g < groups; ++g) {
for (int i = 0; i < channels / groups; ++i) {
data_col_temp
[((((g * kernel_t + q) * kernel_h + r) * kernel_w) +
s) *
(channels / groups) +
i] = zero_point;
}
}
}
} // for each iw
} // for each ih
} // for each it
data_col_temp += kernel_t * kernel_h * kernel_w * channels;
} // for each output pixel
} // for each image row
} // for each frame
}
} // namespace math
} // namespace caffe2
|
effective_diff_coeffs.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
In this file, diffusion coefficients, including Eddy viscosities, are computed.
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "../game_types.h"
#include "../spatial_operators/spatial_operators.h"
#include "../thermodynamics/thermodynamics.h"
int tke_update(Irreversible_quantities *, double, State *, Diagnostics *, Grid *);
double ver_hor_viscosity(double, double, double);
int hori_div_viscosity(State *state, Irreversible_quantities *irrev, Grid *grid, Diagnostics *diagnostics, Config *config)
{
/*
This function computes the effective diffusion coefficient (molecular + turbulent) acting on horizontal divergent movements.
*/
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
// molecular component
irrev -> molecular_diffusion_coeff[i] = calc_diffusion_coeff(diagnostics -> temperature_gas[i], state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i]);
irrev -> viscosity_div[i] = irrev -> molecular_diffusion_coeff[i];
// turbulent component (the divergence is approximately one order of magnitude smaller than the vorticity, that is where the prefactor of 6 comes from)
// 4.0/3 is a result of the stress tensor
irrev -> viscosity_div[i] += config -> diff_h_smag_div*grid -> mean_velocity_area*fabs(4.0/3*diagnostics -> wind_divv[i]);
// maximum (stability constraint)
if (irrev -> viscosity_div[i] > irrev -> max_diff_h_coeff_turb)
{
irrev -> viscosity_div[i] = irrev -> max_diff_h_coeff_turb;
}
// multiplying by the mass density of the gas phase
irrev -> viscosity_div[i] = density_gas(state, i)*irrev -> viscosity_div[i];
}
return 0;
}
int hori_curl_viscosity_rhombi(State *state, Irreversible_quantities *irrev, Grid *grid, Diagnostics *diagnostics, Config *config)
{
/*
This function computes the effective diffusion coefficient (molecular + turbulent) acting on horizontal curl movements on rhombi.
*/
double molecular_viscosity;
int scalar_index_from, scalar_index_to, vector_index;
#pragma omp parallel for private(molecular_viscosity, scalar_index_from, scalar_index_to, vector_index)
for (int h_index = 0; h_index < NO_OF_VECTORS_H; ++h_index)
{
for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index)
{
vector_index = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index;
// preliminary result
irrev -> viscosity_curl_rhombi[vector_index] = config -> diff_h_smag_rot*grid -> mean_velocity_area
*fabs(diagnostics -> rel_vort[NO_OF_VECTORS_H + 2*layer_index*NO_OF_VECTORS_H + h_index]);
// calculating and adding the molecular viscosity
scalar_index_from = layer_index*NO_OF_SCALARS_H + grid -> from_index[h_index];
scalar_index_to = layer_index*NO_OF_SCALARS_H + grid -> to_index[h_index];
molecular_viscosity = calc_diffusion_coeff(
0.5*(diagnostics -> temperature_gas[scalar_index_from] + diagnostics -> temperature_gas[scalar_index_to]),
0.5*(state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + scalar_index_from] + state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + scalar_index_to]));
irrev -> viscosity_curl_rhombi[vector_index] += molecular_viscosity;
// maximum (stability constraint)
if (irrev -> viscosity_curl_rhombi[vector_index] > irrev -> max_diff_h_coeff_turb)
{
irrev -> viscosity_curl_rhombi[vector_index] = irrev -> max_diff_h_coeff_turb;
}
// multiplying by the mass density of the gas phase
irrev -> viscosity_curl_rhombi[vector_index] = 0.5*(density_gas(state, scalar_index_from) + density_gas(state, scalar_index_to))*irrev -> viscosity_curl_rhombi[vector_index];
}
}
// averaging the curl diffusion coefficient from edges to cells
edges_to_cells(irrev -> viscosity_curl_rhombi, irrev -> viscosity_curl, grid);
return 0;
}
int hori_curl_viscosity_triangles(State *state, Irreversible_quantities *irrev, Grid *grid, Dualgrid *dualgrid, Diagnostics *diagnostics, Config *config)
{
/*
This function computes the effective diffusion coefficient (molecular + turbulent) acting on horizontal curl movements on triangles.
*/
int layer_index, h_index, rho_base_index, temp_base_index;
double molecular_viscosity, density_value;
#pragma omp parallel for private(molecular_viscosity, layer_index, h_index, density_value, rho_base_index, temp_base_index)
for (int i = 0; i < NO_OF_DUAL_V_VECTORS; ++i)
{
layer_index = i/NO_OF_DUAL_SCALARS_H;
h_index = i - layer_index*NO_OF_DUAL_SCALARS_H;
// preliminary result
irrev -> viscosity_curl_triangles[i] = config -> diff_h_smag_rot*grid -> mean_velocity_area
*fabs(diagnostics -> rel_vort_on_triangles[layer_index*NO_OF_DUAL_SCALARS_H + h_index]);
rho_base_index = NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + layer_index*NO_OF_SCALARS_H;
// calculating and adding the molecular viscosity
density_value =
1.0/6*(
state -> rho[rho_base_index + grid -> from_index[dualgrid -> vorticity_indices_triangles[3*h_index + 0]]]
+ state -> rho[rho_base_index + grid -> to_index[dualgrid -> vorticity_indices_triangles[3*h_index + 0]]]
+ state -> rho[rho_base_index + grid -> from_index[dualgrid -> vorticity_indices_triangles[3*h_index + 1]]]
+ state -> rho[rho_base_index + grid -> to_index[dualgrid -> vorticity_indices_triangles[3*h_index + 1]]]
+ state -> rho[rho_base_index + grid -> from_index[dualgrid -> vorticity_indices_triangles[3*h_index + 2]]]
+ state -> rho[rho_base_index + grid -> to_index[dualgrid -> vorticity_indices_triangles[3*h_index + 2]]]);
temp_base_index = layer_index*NO_OF_SCALARS_H ;
molecular_viscosity = calc_diffusion_coeff(
1.0/6*(
diagnostics -> temperature_gas[temp_base_index + grid -> from_index[dualgrid -> vorticity_indices_triangles[3*h_index + 0]]]
+ diagnostics -> temperature_gas[temp_base_index + grid -> to_index[dualgrid -> vorticity_indices_triangles[3*h_index + 0]]]
+ diagnostics -> temperature_gas[temp_base_index + grid -> from_index[dualgrid -> vorticity_indices_triangles[3*h_index + 1]]]
+ diagnostics -> temperature_gas[temp_base_index + grid -> to_index[dualgrid -> vorticity_indices_triangles[3*h_index + 1]]]
+ diagnostics -> temperature_gas[temp_base_index + grid -> from_index[dualgrid -> vorticity_indices_triangles[3*h_index + 2]]]
+ diagnostics -> temperature_gas[temp_base_index + grid -> to_index[dualgrid -> vorticity_indices_triangles[3*h_index + 2]]]),
density_value);
irrev -> viscosity_curl_triangles[i] += molecular_viscosity;
// maximum (stability constraint)
if (irrev -> viscosity_curl_triangles[i] > irrev -> max_diff_h_coeff_turb)
{
irrev -> viscosity_curl_triangles[i] = irrev -> max_diff_h_coeff_turb;
}
// multiplying by the mass density of the gas phase
irrev -> viscosity_curl_triangles[i] = density_value*irrev -> viscosity_curl_triangles[i];
}
return 0;
}
int vert_hor_mom_viscosity(State *state, Irreversible_quantities *irrev, Diagnostics *diagnostics, Config *config, Grid *grid, double delta_t)
{
/*
This function computes the effective viscosity (Eddy + molecular viscosity) for the vertical diffusion of horizontal velocity.
This quantity is located at the half level edges.
To obey the symmetry of the stress tensor, the same coefficient must be used for the horizontal diffusion of vertical velocity.
*/
double max_diff_v_coeff_turb = 0.125*pow(
grid -> z_vector[NO_OF_VECTORS - NO_OF_VECTORS_PER_LAYER - NO_OF_SCALARS_H] - grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H]
, 2)/delta_t;
int layer_index, h_index, scalar_base_index;
double mom_diff_coeff, molecuar_viscosity, delta_z;
// updating the TKE
tke_update(irrev, delta_t, state, diagnostics, grid);
// loop over horizontal vector points at half levels
#pragma omp parallel for private(layer_index, h_index, mom_diff_coeff, molecuar_viscosity, scalar_base_index, delta_z)
for (int i = 0; i < NO_OF_H_VECTORS - NO_OF_VECTORS_H; ++i)
{
layer_index = i/NO_OF_VECTORS_H;
h_index = i - layer_index*NO_OF_VECTORS_H;
scalar_base_index = layer_index*NO_OF_SCALARS_H;
// the turbulent component
delta_z = grid -> z_vector[NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index]
- grid -> z_vector[NO_OF_SCALARS_H + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER + h_index];
mom_diff_coeff = 0.25*(ver_hor_viscosity(irrev -> tke[scalar_base_index + grid -> from_index[h_index]], delta_z, irrev -> mixing_length)
+ ver_hor_viscosity(irrev -> tke[scalar_base_index + grid -> to_index[h_index]], delta_z, irrev -> mixing_length)
+ ver_hor_viscosity(irrev -> tke[(layer_index + 1)*NO_OF_SCALARS_H + grid -> from_index[h_index]], delta_z, irrev -> mixing_length)
+ ver_hor_viscosity(irrev -> tke[(layer_index + 1)*NO_OF_SCALARS_H + grid -> to_index[h_index]], delta_z, irrev -> mixing_length));
// computing and adding the molecular viscosity
// the scalar variables need to be averaged to the vector points at half levels
molecuar_viscosity = calc_diffusion_coeff(0.25*(diagnostics -> temperature_gas[scalar_base_index + grid -> from_index[h_index]]
+ diagnostics -> temperature_gas[scalar_base_index + grid -> to_index[h_index]]
+ diagnostics -> temperature_gas[(layer_index + 1)*NO_OF_SCALARS_H + grid -> from_index[h_index]]
+ diagnostics -> temperature_gas[(layer_index + 1)*NO_OF_SCALARS_H + grid -> to_index[h_index]]),
0.25*(state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + scalar_base_index + grid -> from_index[h_index]]
+ state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + scalar_base_index + grid -> to_index[h_index]]
+ state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + (layer_index + 1)*NO_OF_SCALARS_H + grid -> from_index[h_index]]
+ state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + (layer_index + 1)*NO_OF_SCALARS_H + grid -> to_index[h_index]]));
mom_diff_coeff += molecuar_viscosity;
// obeying the stability limit
if (mom_diff_coeff > max_diff_v_coeff_turb)
{
mom_diff_coeff = max_diff_v_coeff_turb;
}
// multiplying by the density (averaged to the half level edge)
irrev -> vert_hor_viscosity[i + NO_OF_VECTORS_H] =
0.25*(density_gas(state, scalar_base_index + grid -> from_index[h_index])
+ density_gas(state, scalar_base_index + grid -> to_index[h_index])
+ density_gas(state, (layer_index + 1)*NO_OF_SCALARS_H + grid -> from_index[h_index])
+ density_gas(state, (layer_index + 1)*NO_OF_SCALARS_H + grid -> to_index[h_index]))
*mom_diff_coeff;
}
// for now, we set the vertical diffusion coefficient at the TOA equal to the vertical diffusion coefficient in the layer below
#pragma omp parallel for
for (int i = 0; i < NO_OF_VECTORS_H; ++i)
{
irrev -> vert_hor_viscosity[i] = irrev -> vert_hor_viscosity[i + NO_OF_VECTORS_H];
}
// for now, we set the vertical diffusion coefficient at the surface equal to the vertical diffusion coefficient in the layer above
#pragma omp parallel for
for (int i = NO_OF_H_VECTORS; i < NO_OF_H_VECTORS + NO_OF_VECTORS_H; ++i)
{
irrev -> vert_hor_viscosity[i] = irrev -> vert_hor_viscosity[i - NO_OF_VECTORS_H];
}
return 0;
}
int vert_w_viscosity(State *state, Grid *grid, Diagnostics *diagnostics, Irreversible_quantities *irrev, double delta_t)
{
/*
This function multiplies scalar_field_placeholder (containing dw/dz) by the diffusion coefficient acting on w because of w.
*/
// the maximum vertical diffusion coefficient
double max_diff_v_coeff_turb = 0.125*pow(
grid -> z_vector[NO_OF_VECTORS - NO_OF_VECTORS_PER_LAYER - NO_OF_SCALARS_H] - grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H]
, 2)/delta_t;
int i;
double mom_diff_coeff;
#pragma omp parallel for private(mom_diff_coeff, i)
for (int h_index = 0; h_index < NO_OF_SCALARS_H; ++h_index)
{
for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index)
{
i = layer_index*NO_OF_SCALARS_H + h_index;
mom_diff_coeff
// molecular viscosity
= irrev -> molecular_diffusion_coeff[i]
// turbulent component
+ pow(
fmin(irrev -> mixing_length,
grid -> z_vector[h_index + layer_index*NO_OF_VECTORS_PER_LAYER] - grid -> z_vector[h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER])
, 2)
*fabs(diagnostics -> scalar_field_placeholder[i]);
// stability criterion
if (mom_diff_coeff > max_diff_v_coeff_turb)
{
mom_diff_coeff = max_diff_v_coeff_turb;
}
diagnostics -> scalar_field_placeholder[i] = density_gas(state, i)*mom_diff_coeff*diagnostics -> scalar_field_placeholder[i];
}
}
return 0;
}
int calc_temp_diffusion_coeffs(State *state, Config *config, Irreversible_quantities *irrev, Diagnostics *diagnostics, double delta_t, Grid *grid)
{
/*
This function computes the viscous temperature diffusion coefficient (including eddies).
*/
// The eddy viscosity coefficient and the TKE only has to be calculated if it has not yet been done.
if (config -> momentum_diff_h == 0)
{
hori_div_viscosity(state, irrev, grid, diagnostics, config);
hori_curl_viscosity_rhombi(state, irrev, grid, diagnostics, config);
tke_update(irrev, delta_t, state, diagnostics, grid);
// molecular viscosity
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
irrev -> molecular_diffusion_coeff[i] = calc_diffusion_coeff(diagnostics -> temperature_gas[i],
state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i]);
}
}
int layer_index, h_index;
double c_g_v;
#pragma omp parallel for private(layer_index, h_index, c_g_v)
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
layer_index = i/NO_OF_SCALARS_H;
h_index = i - layer_index*NO_OF_SCALARS_H;
c_g_v = spec_heat_cap_diagnostics_v(state, i, config);
// horizontal diffusion coefficient
irrev -> scalar_diffusion_coeff_numerical_h[i]
// molecular component
= c_g_v*(density_gas(state, i)*irrev -> molecular_diffusion_coeff[i]
// turbulent component
+ irrev -> viscosity_div[i] + irrev -> viscosity_curl[i]);
// vertical diffusion coefficient
irrev -> scalar_diffusion_coeff_numerical_v[i]
// molecular component
= density_gas(state, i)*c_g_v*(irrev -> molecular_diffusion_coeff[i]
// turbulent component
+ ver_hor_viscosity(irrev -> tke[i],
grid -> z_vector[h_index + layer_index*NO_OF_VECTORS_PER_LAYER] - grid -> z_vector[h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER],
irrev -> mixing_length));
}
return 0;
}
int calc_mass_diffusion_coeffs(State *state, Config *config, Irreversible_quantities *irrev, Diagnostics *diagnostics, double delta_t, Grid *grid)
{
/*
This function computes the viscous tracer diffusion coefficient (including eddies).
*/
// The eddy viscosity coefficient and the TKE only has to be calculated if it has not yet been done.
if (config -> momentum_diff_h == 0 && config -> temperature_diff_h == 0)
{
hori_div_viscosity(state, irrev, grid, diagnostics, config);
hori_curl_viscosity_rhombi(state, irrev, grid, diagnostics, config);
tke_update(irrev, delta_t, state, diagnostics, grid);
// molecular viscosity
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
irrev -> molecular_diffusion_coeff[i] = calc_diffusion_coeff(diagnostics -> temperature_gas[i],
state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i]);
}
}
int layer_index, h_index;
#pragma omp parallel for private(layer_index, h_index)
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
layer_index = i/NO_OF_SCALARS_H;
h_index = i - layer_index*NO_OF_SCALARS_H;
// horizontal diffusion coefficient
irrev -> scalar_diffusion_coeff_numerical_h[i]
// molecular component
= irrev -> molecular_diffusion_coeff[i]
// turbulent component
+ (irrev -> viscosity_div[i] + irrev -> viscosity_curl[i])
/density_gas(state, i);
// vertical diffusion coefficient
irrev -> scalar_diffusion_coeff_numerical_v[i]
// molecular component
= irrev -> molecular_diffusion_coeff[i]
// turbulent component
+ ver_hor_viscosity(irrev -> tke[i], grid -> z_vector[h_index + layer_index*NO_OF_VECTORS_PER_LAYER] - grid -> z_vector[h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER],
irrev -> mixing_length);
}
return 0;
}
int tke_update(Irreversible_quantities *irrev, double delta_t, State *state, Diagnostics *diagnostics, Grid *grid)
{
/*
This function updates the specific turbulent kinetic energy (TKE), unit: J/kg.
*/
// the ratio of global unresolved to resolved kinetic energy
double tke_ke_ratio = 0.1*pow(4, 5 - RES_ID);
// the e-folding time of TKE approximation
double tke_approx_time = 10800*pow(4, 5 - RES_ID);
// computing the advection
grad(irrev -> tke, diagnostics -> vector_field_placeholder, grid);
inner_product(diagnostics -> vector_field_placeholder, state -> wind, diagnostics -> scalar_field_placeholder, grid);
double boundary_layer_height = 1000.0;
double roughness_length_factor = 1.0/0.08;
int i;
double decay_constant, production_rate;
#pragma omp parallel for private(i, decay_constant, production_rate)
for (int h_index = 0; h_index < NO_OF_SCALARS_H; ++h_index)
{
for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index)
{
i = layer_index*NO_OF_SCALARS_H + h_index;
decay_constant = 8*pow(M_PI, 2)/grid -> mean_velocity_area*(irrev -> viscosity_div[i] + irrev -> viscosity_curl[i])/density_gas(state, i);
production_rate = 0;
// the decay constants differ over land vs over water
if (grid -> z_scalar[i] - grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H + h_index] <= boundary_layer_height)
{
production_rate =
// factor taking into account the roughness of the surface
roughness_length_factor*grid -> roughness_length[h_index]
// height-dependent factor
*(boundary_layer_height - (grid -> z_scalar[i] - grid -> z_vector[NO_OF_VECTORS - NO_OF_SCALARS_H + h_index]))/boundary_layer_height
*(tke_ke_ratio*0.5*diagnostics -> v_squared[i] - irrev -> tke[i])/tke_approx_time;
// restricting the production rate to positive values
production_rate = fmax(0, production_rate);
}
// prognostic equation for TKE
irrev -> tke[i] += delta_t*(
// advection
- diagnostics -> scalar_field_placeholder[i]
// production through dissipation of resolved energy
+ irrev -> heating_diss[i]/density_gas(state, i)
// decay through molecular dissipation
- decay_constant*irrev -> tke[i]
// production through turbulence generation in the boundary layer
+ production_rate);
// clipping negative values which might occur through advection
if (irrev -> tke[i] < 0)
{
irrev -> tke[i] = 0;
}
}
}
return 0;
}
double ver_hor_viscosity(double tke, double delta_z, double mixing_length)
{
/*
This function returns the vertical kinematic Eddy viscosity as a function of the specific TKE.
*/
double prop_constant = 0.03*fmin(delta_z, mixing_length); // unit: m
double result = prop_constant*pow(2*tke, 0.5);
return result;
}
|
omp_picalc_monte_carlo.c | #include <omp.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
const unsigned int NUM_POINTS = 100000000;
const unsigned int RADIUS = 1.0;
bool is_inside_circle(double x, double y, double radius) {
return (x * x + y * y) < radius;
}
void seedThreads(unsigned int* seeds) {
unsigned int seed, thread_id;
#pragma omp parallel private(seed, thread_id)
{
thread_id = omp_get_thread_num();
seed = (unsigned) time(NULL);
seeds[thread_id] = (seed & 0xFFFFFFF0) | (thread_id + 1);
printf("thread %d has seed %u\n", thread_id, seeds[thread_id]);
}
}
int main (int argc, char *argv[]) {
unsigned int seeds[omp_get_num_threads()];
seedThreads(seeds);
unsigned int correct_points = 0;
double start = omp_get_wtime();
#pragma omp parallel for shared(seeds) reduction(+:correct_points)
for (size_t i = 0; i < NUM_POINTS; ++i) {
double x = ((double)rand_r(&seeds[omp_get_thread_num()]) / (double)RAND_MAX);
double y = ((double)rand_r(&seeds[omp_get_thread_num()]) / (double)RAND_MAX);
correct_points += is_inside_circle(x, y, RADIUS);
}
double end = omp_get_wtime();
printf("pi = %f (time: %f s, points: %u)\n",
(double)(4.0 * correct_points / NUM_POINTS), end - start, NUM_POINTS);
return 0;
} |
Square.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/Square.c"
#else
void THNN_(Square_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output)
{
THTensor_(resizeAs)(output, input);
if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output))
{
TH_TENSOR_APPLY2(real, output, real, input,
*output_data = (*input_data) * (*input_data);
);
}
else
{
real *output_data = THTensor_(data)(output);
real *input_data = THTensor_(data)(input);
int64_t i;
#pragma omp parallel for private(i)
for (i = 0; i < THTensor_(nElement)(input); i++)
output_data[i] = input_data[i]*input_data[i];
}
}
void THNN_(Square_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput)
{
THNN_CHECK_SHAPE(input, gradOutput);
THTensor_(resizeAs)(gradInput, input);
if (input->nDimension == 1 ||
!THTensor_(isContiguous)(input) ||
!THTensor_(isContiguous)(gradOutput) ||
!THTensor_(isContiguous)(gradInput))
{
TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input,
*gradInput_data = 2.0 * (*gradOutput_data) * (*input_data);
);
}
else
{
real *gradOutput_data = THTensor_(data)(gradOutput);
real *gradInput_data = THTensor_(data)(gradInput);
real *input_data = THTensor_(data)(input);
int64_t i;
#pragma omp parallel for private(i)
for (i = 0; i < THTensor_(nElement)(gradInput); i++)
gradInput_data[i] = 2.0 * gradOutput_data[i] * input_data[i];
}
}
#endif
|
LAGraph_Sort3.c | //------------------------------------------------------------------------------
// LAGraph_Sort3: sort a 3-by-n list of integers, using A[0:2][ ] as the key
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
// Contributed by Tim Davis, Texas A&M University.
//------------------------------------------------------------------------------
// A parallel mergesort of an array of 3-by-n integers. Each key
// consists of three integers.
#define LAGraph_FREE_ALL LAGraph_Free ((void **) &W) ;
#include "LG_internal.h"
//------------------------------------------------------------------------------
// prototype only needed for LAGraph_Sort3
//------------------------------------------------------------------------------
void LG_msort_3b_create_merge_tasks
(
// output:
int64_t *LG_RESTRICT L_task, // L_task [t0...t0+ntasks-1] computed
int64_t *LG_RESTRICT L_len, // L_len [t0...t0+ntasks-1] computed
int64_t *LG_RESTRICT R_task, // R_task [t0...t0+ntasks-1] computed
int64_t *LG_RESTRICT R_len, // R_len [t0...t0+ntasks-1] computed
int64_t *LG_RESTRICT S_task, // S_task [t0...t0+ntasks-1] computed
// input:
const int t0, // first task tid to create
const int ntasks, // # of tasks to create
const int64_t pS_start, // merge into S [pS_start...]
const int64_t *LG_RESTRICT L_0, // Left = L [pL_start...pL_end-1]
const int64_t *LG_RESTRICT L_1,
const int64_t *LG_RESTRICT L_2,
const int64_t pL_start,
const int64_t pL_end,
const int64_t *LG_RESTRICT R_0, // Right = R [pR_start...pR_end-1]
const int64_t *LG_RESTRICT R_1,
const int64_t *LG_RESTRICT R_2,
const int64_t pR_start,
const int64_t pR_end
) ;
//------------------------------------------------------------------------------
// LG_msort_3b_binary_search: binary search for the pivot
//------------------------------------------------------------------------------
// The Pivot value is Y [pivot], and a binary search for the Pivot is made in
// the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on
// input. The return value is pleft, where
//
// X [p_start ... pleft-1] <= Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
//
// pleft is returned in the range p_start to p_end. If pleft is p_start, then
// the Pivot is smaller than all entries in X [p_start...p_end-1], and the left
// list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is
// larger than all entries in X [p_start...p_end-1], and the right list X
// [pleft...p_end-1] is empty.
static int64_t LG_msort_3b_binary_search // return pleft
(
const int64_t *LG_RESTRICT Y_0, // Pivot is Y [pivot]
const int64_t *LG_RESTRICT Y_1,
const int64_t *LG_RESTRICT Y_2,
const int64_t pivot,
const int64_t *LG_RESTRICT X_0, // search in X [p_start..p_end_-1]
const int64_t *LG_RESTRICT X_1,
const int64_t *LG_RESTRICT X_2,
const int64_t p_start,
const int64_t p_end
)
{
//--------------------------------------------------------------------------
// find where the Pivot appears in X
//--------------------------------------------------------------------------
// binary search of X [p_start...p_end-1] for the Pivot
int64_t pleft = p_start ;
int64_t pright = p_end - 1 ;
while (pleft < pright)
{
int64_t pmiddle = (pleft + pright) >> 1 ;
// less = (X [pmiddle] < Pivot)
bool less = LG_lt_3 (X_0, X_1, X_2, pmiddle,
Y_0, Y_1, Y_2, pivot) ;
pleft = less ? (pmiddle+1) : pleft ;
pright = less ? pright : pmiddle ;
}
// binary search is narrowed down to a single item
// or it has found the list is empty:
ASSERT (pleft == pright || pleft == pright + 1) ;
// If found is true then X [pleft == pright] == Pivot. If duplicates
// appear then X [pleft] is any one of the entries equal to the Pivot
// in the list. If found is false then
// X [p_start ... pleft-1] < Pivot and
// X [pleft+1 ... p_end-1] > Pivot holds.
// The value X [pleft] may be either < or > Pivot.
bool found = (pleft == pright) && LG_eq_3 (X_0, X_1, X_2, pleft,
Y_0, Y_1, Y_2, pivot) ;
// Modify pleft and pright:
if (!found && (pleft == pright))
{
if (LG_lt_3 (X_0, X_1, X_2, pleft,
Y_0, Y_1, Y_2, pivot))
{
pleft++ ;
}
else
{
// pright++ ; // (not needed)
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
// If found is false then
// X [p_start ... pleft-1] < Pivot and
// X [pleft ... p_end-1] > Pivot holds,
// and pleft-1 == pright
// If X has no duplicates, then whether or not Pivot is found,
// X [p_start ... pleft-1] < Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
// If X has duplicates, then whether or not Pivot is found,
// X [p_start ... pleft-1] <= Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
return (pleft) ;
}
//------------------------------------------------------------------------------
// LG_msort_3b_create_merge_tasks
//------------------------------------------------------------------------------
// Recursively constructs ntasks tasks to merge two arrays, Left and Right,
// into Sresult, where Left is L [pL_start...pL_end-1], Right is R
// [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1],
// and where total_work is the total size of Left and Right.
//
// Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and
// R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output
// array S [S_task [tid] ... ]. The task tids created are t0 to
// t0+ntasks-1.
void LG_msort_3b_create_merge_tasks
(
// output:
int64_t *LG_RESTRICT L_task, // L_task [t0...t0+ntasks-1] computed
int64_t *LG_RESTRICT L_len, // L_len [t0...t0+ntasks-1] computed
int64_t *LG_RESTRICT R_task, // R_task [t0...t0+ntasks-1] computed
int64_t *LG_RESTRICT R_len, // R_len [t0...t0+ntasks-1] computed
int64_t *LG_RESTRICT S_task, // S_task [t0...t0+ntasks-1] computed
// input:
const int t0, // first task tid to create
const int ntasks, // # of tasks to create
const int64_t pS_start, // merge into S [pS_start...]
const int64_t *LG_RESTRICT L_0, // Left = L [pL_start...pL_end-1]
const int64_t *LG_RESTRICT L_1,
const int64_t *LG_RESTRICT L_2,
const int64_t pL_start,
const int64_t pL_end,
const int64_t *LG_RESTRICT R_0, // Right = R [pR_start...pR_end-1]
const int64_t *LG_RESTRICT R_1,
const int64_t *LG_RESTRICT R_2,
const int64_t pR_start,
const int64_t pR_end
)
{
//--------------------------------------------------------------------------
// get problem size
//--------------------------------------------------------------------------
int64_t nleft = pL_end - pL_start ; // size of Left array
int64_t nright = pR_end - pR_start ; // size of Right array
int64_t total_work = nleft + nright ; // total work to do
ASSERT (ntasks >= 1) ;
ASSERT (total_work > 0) ;
//--------------------------------------------------------------------------
// create the tasks
//--------------------------------------------------------------------------
if (ntasks == 1)
{
//----------------------------------------------------------------------
// a single task will merge all of Left and Right into Sresult
//----------------------------------------------------------------------
L_task [t0] = pL_start ; L_len [t0] = nleft ;
R_task [t0] = pR_start ; R_len [t0] = nright ;
S_task [t0] = pS_start ;
}
else
{
//----------------------------------------------------------------------
// partition the Left and Right arrays for multiple merge tasks
//----------------------------------------------------------------------
int64_t pleft, pright ;
if (nleft >= nright)
{
// split Left in half, and search for its pivot in Right
pleft = (pL_end + pL_start) >> 1 ;
pright = LG_msort_3b_binary_search (
L_0, L_1, L_2, pleft,
R_0, R_1, R_2, pR_start, pR_end) ;
}
else
{
// split Right in half, and search for its pivot in Left
pright = (pR_end + pR_start) >> 1 ;
pleft = LG_msort_3b_binary_search (
R_0, R_1, R_2, pright,
L_0, L_1, L_2, pL_start, pL_end) ;
}
//----------------------------------------------------------------------
// partition the tasks according to the work of each partition
//----------------------------------------------------------------------
// work0 is the total work in the first partition
int64_t work0 = (pleft - pL_start) + (pright - pR_start) ;
int ntasks0 = (int) round ((double) ntasks *
(((double) work0) / ((double) total_work))) ;
// ensure at least one task is assigned to each partition
ntasks0 = LAGraph_MAX (ntasks0, 1) ;
ntasks0 = LAGraph_MIN (ntasks0, ntasks-1) ;
int ntasks1 = ntasks - ntasks0 ;
//----------------------------------------------------------------------
// assign ntasks0 to the first half
//----------------------------------------------------------------------
// ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1]
// into the result S [pS_start...work0-1].
LG_msort_3b_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start,
L_0, L_1, L_2, pL_start, pleft,
R_0, R_1, R_2, pR_start, pright) ;
//----------------------------------------------------------------------
// assign ntasks1 to the second half
//----------------------------------------------------------------------
// ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1]
// into the result S [pS_start+work0...pS_start+total_work].
int t1 = t0 + ntasks0 ; // first task id of the second set of tasks
int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S
LG_msort_3b_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1,
L_0, L_1, L_2, pleft, pL_end,
R_0, R_1, R_2, pright, pR_end) ;
}
}
//------------------------------------------------------------------------------
// LG_msort_3b_merge: merge two sorted lists via a single thread
//------------------------------------------------------------------------------
// merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */
static void LG_msort_3b_merge
(
int64_t *LG_RESTRICT S_0, // output of length nleft + nright
int64_t *LG_RESTRICT S_1,
int64_t *LG_RESTRICT S_2,
const int64_t *LG_RESTRICT Left_0, // left input of length nleft
const int64_t *LG_RESTRICT Left_1,
const int64_t *LG_RESTRICT Left_2,
const int64_t nleft,
const int64_t *LG_RESTRICT Right_0, // right input of length nright
const int64_t *LG_RESTRICT Right_1,
const int64_t *LG_RESTRICT Right_2,
const int64_t nright
)
{
int64_t p, pleft, pright ;
// merge the two inputs, Left and Right, while both inputs exist
for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++)
{
if (LG_lt_3 (Left_0, Left_1, Left_2, pleft,
Right_0, Right_1, Right_2, pright))
{
// S [p] = Left [pleft++]
S_0 [p] = Left_0 [pleft] ;
S_1 [p] = Left_1 [pleft] ;
S_2 [p] = Left_2 [pleft] ;
pleft++ ;
}
else
{
// S [p] = Right [pright++]
S_0 [p] = Right_0 [pright] ;
S_1 [p] = Right_1 [pright] ;
S_2 [p] = Right_2 [pright] ;
pright++ ;
}
}
// either input is exhausted; copy the remaining list into S
if (pleft < nleft)
{
int64_t nremaining = (nleft - pleft) ;
memcpy (S_0 + p, Left_0 + pleft, nremaining * sizeof (int64_t)) ;
memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ;
memcpy (S_2 + p, Left_2 + pleft, nremaining * sizeof (int64_t)) ;
}
else if (pright < nright)
{
int64_t nremaining = (nright - pright) ;
memcpy (S_0 + p, Right_0 + pright, nremaining * sizeof (int64_t)) ;
memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ;
memcpy (S_2 + p, Right_2 + pright, nremaining * sizeof (int64_t)) ;
}
}
//------------------------------------------------------------------------------
// LAGraph_Sort3: parallel mergesort
//------------------------------------------------------------------------------
int LAGraph_Sort3 // sort array A of size 3-by-n, using 3 keys (A [0:2][])
(
int64_t *LG_RESTRICT A_0, // size n array
int64_t *LG_RESTRICT A_1, // size n array
int64_t *LG_RESTRICT A_2, // size n array
const int64_t n,
int nthreads, // # of threads to use
char *msg
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
LG_CLEAR_MSG ;
int64_t *LG_RESTRICT W = NULL ;
LG_CHECK (A_0 == NULL, -1, "A_0 is NULL") ;
LG_CHECK (A_1 == NULL, -1, "A_1 is NULL") ;
LG_CHECK (A_2 == NULL, -1, "A_2 is NULL") ;
//--------------------------------------------------------------------------
// handle small problems with a single thread
//--------------------------------------------------------------------------
if (nthreads <= 1 || n <= LG_BASECASE)
{
// sequential quicksort
LG_qsort_3 (A_0, A_1, A_2, n) ;
return (0) ;
}
//--------------------------------------------------------------------------
// determine # of tasks
//--------------------------------------------------------------------------
// determine the number of levels to create, which must always be an
// even number. The # of levels is chosen to ensure that the # of leaves
// of the task tree is between 4*nthreads and 16*nthreads.
// 2 to 4 threads: 4 levels, 16 qsort leaves
// 5 to 16 threads: 6 levels, 64 qsort leaves
// 17 to 64 threads: 8 levels, 256 qsort leaves
// 65 to 256 threads: 10 levels, 1024 qsort leaves
// 256 to 1024 threads: 12 levels, 4096 qsort leaves
// ...
int k = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ;
int ntasks = 1 << k ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
W = LAGraph_Malloc (3*n + 6*ntasks + 1, sizeof (int64_t)) ;
LG_CHECK (W == NULL, -1, "out of memory") ;
int64_t *T = W ;
int64_t *LG_RESTRICT W_0 = T ; T += n ;
int64_t *LG_RESTRICT W_1 = T ; T += n ;
int64_t *LG_RESTRICT W_2 = T ; T += n ;
int64_t *LG_RESTRICT L_task = T ; T += ntasks ;
int64_t *LG_RESTRICT L_len = T ; T += ntasks ;
int64_t *LG_RESTRICT R_task = T ; T += ntasks ;
int64_t *LG_RESTRICT R_len = T ; T += ntasks ;
int64_t *LG_RESTRICT S_task = T ; T += ntasks ;
int64_t *LG_RESTRICT Slice = T ; T += (ntasks+1) ;
//--------------------------------------------------------------------------
// partition and sort the leaves
//--------------------------------------------------------------------------
LG_eslice (Slice, n, ntasks) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t leaf = Slice [tid] ;
int64_t leafsize = Slice [tid+1] - leaf ;
LG_qsort_3 (A_0 + leaf, A_1 + leaf, A_2 + leaf, leafsize) ;
}
//--------------------------------------------------------------------------
// merge each level
//--------------------------------------------------------------------------
int nt = 1 ;
for ( ; k >= 2 ; k -= 2)
{
//----------------------------------------------------------------------
// merge level k into level k-1, from A into W
//----------------------------------------------------------------------
// this could be done in parallel if ntasks was large
for (int tid = 0 ; tid < ntasks ; tid += 2*nt)
{
// create 2*nt tasks to merge two A sublists into one W sublist
LG_msort_3b_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid],
A_0, A_1, A_2, Slice [tid], Slice [tid+nt],
A_0, A_1, A_2, Slice [tid+nt], Slice [tid+2*nt]) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..]
int64_t pL = L_task [tid], nL = L_len [tid] ;
int64_t pR = R_task [tid], nR = R_len [tid] ;
int64_t pS = S_task [tid] ;
LG_msort_3b_merge (
W_0 + pS, W_1 + pS, W_2 + pS,
A_0 + pL, A_1 + pL, A_2 + pL, nL,
A_0 + pR, A_1 + pR, A_2 + pR, nR) ;
}
nt = 2*nt ;
//----------------------------------------------------------------------
// merge level k-1 into level k-2, from W into A
//----------------------------------------------------------------------
// this could be done in parallel if ntasks was large
for (int tid = 0 ; tid < ntasks ; tid += 2*nt)
{
// create 2*nt tasks to merge two W sublists into one A sublist
LG_msort_3b_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid],
W_0, W_1, W_2, Slice [tid], Slice [tid+nt],
W_0, W_1, W_2, Slice [tid+nt], Slice [tid+2*nt]) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..]
int64_t pL = L_task [tid], nL = L_len [tid] ;
int64_t pR = R_task [tid], nR = R_len [tid] ;
int64_t pS = S_task [tid] ;
LG_msort_3b_merge (
A_0 + pS, A_1 + pS, A_2 + pS,
W_0 + pL, W_1 + pL, W_2 + pL, nL,
W_0 + pR, W_1 + pR, W_2 + pR, nR) ;
}
nt = 2*nt ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
LAGraph_FREE_ALL ;
return (0) ;
}
|
simd_metadata.c | // RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -fopenmp -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -fopenmp -triple i386-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -fopenmp -triple i386-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -fopenmp -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC
// RUN: %clang_cc1 -fopenmp -triple powerpc64-unknown-unknown -target-abi elfv1-qpx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC-QPX
// RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -fopenmp-simd -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -fopenmp-simd -triple i386-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -fopenmp-simd -triple i386-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -fopenmp-simd -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC
// RUN: %clang_cc1 -fopenmp-simd -triple powerpc64-unknown-unknown -target-abi elfv1-qpx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC-QPX
void h1(float *c, float *a, double b[], int size)
{
// CHECK-LABEL: define void @h1
int t = 0;
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b)
// CHECK: [[C_PTRINT:%.+]] = ptrtoint
// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31
// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])
// CHECK: [[A_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31
// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63
// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])
// CHECK: [[B_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63
// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
}
// do not emit llvm.access.group metadata due to usage of safelen clause.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}}
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) simdlen(8)
// CHECK: [[C_PTRINT:%.+]] = ptrtoint
// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31
// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])
// CHECK: [[A_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31
// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63
// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])
// CHECK: [[B_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63
// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
}
// do not emit llvm.access.group metadata due to usage of safelen clause.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}}
#pragma omp simd linear(t) aligned(c:32) aligned(a,b) simdlen(8)
// CHECK: [[C_PTRINT:%.+]] = ptrtoint
// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31
// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])
// CHECK: [[A_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31
// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63
// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])
// CHECK: [[B_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63
// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_7:[0-9]+]]
}
}
void h2(float *c, float *a, float *b, int size)
{
// CHECK-LABEL: define void @h2
int t = 0;
#pragma omp simd linear(t)
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_10:[0-9]+]]
}
// CHECK: br label %{{.+}}, !llvm.loop [[LOOP_H2_HEADER:![0-9]+]]
}
void h3(float *c, float *a, float *b, int size)
{
// CHECK-LABEL: define void @h3
#pragma omp simd
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
c[j*i] = a[i] * b[j];
}
// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group ![[ACCESS_GROUP_13:[0-9]+]]
}
// CHECK: br label %{{.+}}, !llvm.loop [[LOOP_H3_HEADER:![0-9]+]]
}
// Metadata for h1:
// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_16:![0-9]+]], [[LOOP_VEC_ENABLE:![0-9]+]]}
// CHECK: [[LOOP_WIDTH_16]] = !{!"llvm.loop.vectorize.width", i32 16}
// CHECK: [[LOOP_VEC_ENABLE]] = !{!"llvm.loop.vectorize.enable", i1 true}
// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_8:![0-9]+]], [[LOOP_VEC_ENABLE]]}
// CHECK: [[LOOP_WIDTH_8]] = !{!"llvm.loop.vectorize.width", i32 8}
// CHECK: ![[ACCESS_GROUP_7]] = distinct !{}
// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_8]], [[LOOP_VEC_ENABLE]], ![[PARALLEL_ACCESSES_9:[0-9]+]]}
// CHECK: ![[PARALLEL_ACCESSES_9]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_7]]}
//
// Metadata for h2:
// CHECK: ![[ACCESS_GROUP_10]] = distinct !{}
// CHECK: [[LOOP_H2_HEADER]] = distinct !{[[LOOP_H2_HEADER]], [[LOOP_VEC_ENABLE]], ![[PARALLEL_ACCESSES_12:[0-9]+]]}
// CHECK: ![[PARALLEL_ACCESSES_12]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_10]]}
//
// Metadata for h3:
// CHECK: ![[ACCESS_GROUP_13]] = distinct !{}
// CHECK: [[LOOP_H3_HEADER]] = distinct !{[[LOOP_H3_HEADER]], [[LOOP_VEC_ENABLE]], ![[PARALLEL_ACCESSES_15:[0-9]+]]}
// CHECK: ![[PARALLEL_ACCESSES_15]] = !{!"llvm.loop.parallel_accesses", ![[ACCESS_GROUP_13]]}
//
|
program_schedule_runtime.c | #include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <unistd.h>
int main(int argc, char* argv[]) {
int thread_count = strtol(argv[1], NULL, 10);
int n = strtol(argv[2], NULL, 10);
#pragma omp parallel for num_threads(thread_count) schedule(runtime)
for (int i = 0; i < n; i ++) {
printf("i=%d, thread_id=%d\n", i, omp_get_thread_num());
}
return 0;
} |
ParticleBConds3DSoa.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_PARTICLE_BCONDS_3D_SOA_H
#define QMCPLUSPLUS_PARTICLE_BCONDS_3D_SOA_H
#include <config.h>
#include <algorithm>
#include "Lattice/CrystalLattice.h"
namespace qmcplusplus
{
/** specialization for an open 3D
*/
template<class T>
struct DTD_BConds<T, 3, SUPERCELL_OPEN + SOA_OFFSET>
{
/** constructor: doing nothing */
inline DTD_BConds(const CrystalLattice<T, 3>& lat) {}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
dx[iat] = px[iat] - x0;
dy[iat] = py[iat] - y0;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int iat,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
dx[iat] = px[iat] - x0;
dy[iat] = py[iat] - y0;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
};
/** specialization for a periodic 3D, orthorombic cell
*/
template<class T>
struct DTD_BConds<T, 3, PPPO + SOA_OFFSET>
{
T Linv0, L0, Linv1, L1, Linv2, L2, r2max, dummy;
inline DTD_BConds(const CrystalLattice<T, 3>& lat)
: Linv0(lat.OneOverLength[0]),
L0(lat.Length[0]),
Linv1(lat.OneOverLength[1]),
L1(lat.Length[1]),
Linv2(lat.OneOverLength[2]),
L2(lat.Length[2]),
r2max(lat.CellRadiusSq),
dummy(T())
{}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T x = (px[iat] - x0) * Linv0;
const T y = (py[iat] - y0) * Linv1;
const T z = (pz[iat] - z0) * Linv2;
dx[iat] = L0 * (x - round(x));
dy[iat] = L1 * (y - round(y));
dz[iat] = L2 * (z - round(z));
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int iat,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
const T x = (px[iat] - x0) * Linv0;
const T y = (py[iat] - y0) * Linv1;
const T z = (pz[iat] - z0) * Linv2;
dx[iat] = L0 * (x - round(x));
dy[iat] = L1 * (y - round(y));
dz[iat] = L2 * (z - round(z));
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
};
/** specialization for a periodic 3D general cell with wigner-seitz==simulation cell
*
* Skip image cells.
*/
template<class T>
struct DTD_BConds<T, 3, PPPS + SOA_OFFSET>
{
T r00, r10, r20, r01, r11, r21, r02, r12, r22;
T g00, g10, g20, g01, g11, g21, g02, g12, g22;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r20(lat.R(6)),
r01(lat.R(1)),
r11(lat.R(4)),
r21(lat.R(7)),
r02(lat.R(2)),
r12(lat.R(5)),
r22(lat.R(8)),
g00(lat.G(0)),
g10(lat.G(3)),
g20(lat.G(6)),
g01(lat.G(1)),
g11(lat.G(4)),
g21(lat.G(7)),
g02(lat.G(2)),
g12(lat.G(5)),
g22(lat.G(8))
{}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T displ_0 = px[iat] - x0;
T displ_1 = py[iat] - y0;
T displ_2 = pz[iat] - z0;
T ar_0 = displ_0 * g00 + displ_1 * g10 + displ_2 * g20;
T ar_1 = displ_0 * g01 + displ_1 * g11 + displ_2 * g21;
T ar_2 = displ_0 * g02 + displ_1 * g12 + displ_2 * g22;
//put them in the box
ar_0 -= round(ar_0);
ar_1 -= round(ar_1);
ar_2 -= round(ar_2);
//unit2cart
dx[iat] = ar_0 * r00 + ar_1 * r10 + ar_2 * r20;
dy[iat] = ar_0 * r01 + ar_1 * r11 + ar_2 * r21;
dz[iat] = ar_0 * r02 + ar_1 * r12 + ar_2 * r22;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int iat,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
T displ_0 = px[iat] - x0;
T displ_1 = py[iat] - y0;
T displ_2 = pz[iat] - z0;
T ar_0 = displ_0 * g00 + displ_1 * g10 + displ_2 * g20;
T ar_1 = displ_0 * g01 + displ_1 * g11 + displ_2 * g21;
T ar_2 = displ_0 * g02 + displ_1 * g12 + displ_2 * g22;
//put them in the box
ar_0 -= round(ar_0);
ar_1 -= round(ar_1);
ar_2 -= round(ar_2);
//unit2cart
dx[iat] = ar_0 * r00 + ar_1 * r10 + ar_2 * r20;
dy[iat] = ar_0 * r01 + ar_1 * r11 + ar_2 * r21;
dz[iat] = ar_0 * r02 + ar_1 * r12 + ar_2 * r22;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
};
/** specialization for a periodic 3D general cell
*
* Wigner-Seitz cell radius > simulation cell radius
* Need to check image cells
*/
template<class T>
struct DTD_BConds<T, 3, PPPG + SOA_OFFSET>
{
T g00, g10, g20, g01, g11, g21, g02, g12, g22;
T r00, r10, r20, r01, r11, r21, r02, r12, r22;
TinyVector<TinyVector<T, 8>, 3> corners;
DTD_BConds(const CrystalLattice<T, 3>& lat)
{
TinyVector<TinyVector<T, 3>, 3> rb;
rb[0] = lat.a(0);
rb[1] = lat.a(1);
rb[2] = lat.a(2);
find_reduced_basis(rb);
r00 = rb[0][0];
r10 = rb[1][0];
r20 = rb[2][0];
r01 = rb[0][1];
r11 = rb[1][1];
r21 = rb[2][1];
r02 = rb[0][2];
r12 = rb[1][2];
r22 = rb[2][2];
Tensor<T, 3> rbt;
for (int i = 0; i < 3; ++i)
for (int j = 0; j < 3; ++j)
rbt(i, j) = rb[i][j];
Tensor<T, 3> g = inverse(rbt);
g00 = g(0);
g10 = g(3);
g20 = g(6);
g01 = g(1);
g11 = g(4);
g21 = g(7);
g02 = g(2);
g12 = g(5);
g22 = g(8);
constexpr T minusone(-1);
constexpr T zero(0);
for (int idim = 0; idim < 3; idim++)
{
auto& corners_dim = corners[idim];
corners_dim[0] = zero;
corners_dim[1] = minusone * (rb[0][idim]);
corners_dim[2] = minusone * (rb[1][idim]);
corners_dim[3] = minusone * (rb[2][idim]);
corners_dim[4] = minusone * (rb[0][idim] + rb[1][idim]);
corners_dim[5] = minusone * (rb[0][idim] + rb[2][idim]);
corners_dim[6] = minusone * (rb[1][idim] + rb[2][idim]);
corners_dim[7] = minusone * (rb[0][idim] + rb[1][idim] + rb[2][idim]);
}
}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
const auto& cellx = corners[0];
const auto& celly = corners[1];
const auto& cellz = corners[2];
constexpr T minusone(-1);
constexpr T one(1);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T flip = iat < flip_ind ? one : minusone;
const T displ_0 = (px[iat] - x0) * flip;
const T displ_1 = (py[iat] - y0) * flip;
const T displ_2 = (pz[iat] - z0) * flip;
const T ar_0 = -std::floor(displ_0 * g00 + displ_1 * g10 + displ_2 * g20);
const T ar_1 = -std::floor(displ_0 * g01 + displ_1 * g11 + displ_2 * g21);
const T ar_2 = -std::floor(displ_0 * g02 + displ_1 * g12 + displ_2 * g22);
const T delx = displ_0 + ar_0 * r00 + ar_1 * r10 + ar_2 * r20;
const T dely = displ_1 + ar_0 * r01 + ar_1 * r11 + ar_2 * r21;
const T delz = displ_2 + ar_0 * r02 + ar_1 * r12 + ar_2 * r22;
T rmin = delx * delx + dely * dely + delz * delz;
int ic = 0;
#pragma unroll(7)
for (int c = 1; c < 8; ++c)
{
const T x = delx + cellx[c];
const T y = dely + celly[c];
const T z = delz + cellz[c];
const T r2 = x * x + y * y + z * z;
ic = (r2 < rmin) ? c : ic;
rmin = (r2 < rmin) ? r2 : rmin;
}
temp_r[iat] = std::sqrt(rmin);
dx[iat] = flip * (delx + cellx[ic]);
dy[iat] = flip * (dely + celly[ic]);
dz[iat] = flip * (delz + cellz[ic]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int iat,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
const auto& cellx = corners[0];
const auto& celly = corners[1];
const auto& cellz = corners[2];
constexpr T minusone(-1);
constexpr T one(1);
const T flip = iat < flip_ind ? one : minusone;
const T displ_0 = (px[iat] - x0) * flip;
const T displ_1 = (py[iat] - y0) * flip;
const T displ_2 = (pz[iat] - z0) * flip;
const T ar_0 = -std::floor(displ_0 * g00 + displ_1 * g10 + displ_2 * g20);
const T ar_1 = -std::floor(displ_0 * g01 + displ_1 * g11 + displ_2 * g21);
const T ar_2 = -std::floor(displ_0 * g02 + displ_1 * g12 + displ_2 * g22);
const T delx = displ_0 + ar_0 * r00 + ar_1 * r10 + ar_2 * r20;
const T dely = displ_1 + ar_0 * r01 + ar_1 * r11 + ar_2 * r21;
const T delz = displ_2 + ar_0 * r02 + ar_1 * r12 + ar_2 * r22;
T rmin = delx * delx + dely * dely + delz * delz;
int ic = 0;
#pragma unroll(7)
for (int c = 1; c < 8; ++c)
{
const T x = delx + cellx[c];
const T y = dely + celly[c];
const T z = delz + cellz[c];
const T r2 = x * x + y * y + z * z;
ic = (r2 < rmin) ? c : ic;
rmin = (r2 < rmin) ? r2 : rmin;
}
temp_r[iat] = std::sqrt(rmin);
dx[iat] = flip * (delx + cellx[ic]);
dy[iat] = flip * (dely + celly[ic]);
dz[iat] = flip * (delz + cellz[ic]);
}
};
/** specialization for a slab, general cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNG + SOA_OFFSET>
{
T g00, g10, g01, g11;
T r00, r10, r01, r11;
TinyVector<TinyVector<T, 3>, 3> rb;
TinyVector<TinyVector<T, 4>, 2> corners;
DTD_BConds(const CrystalLattice<T, 3>& lat)
{
rb[0] = lat.a(0);
rb[1] = lat.a(1);
rb[2] = lat.a(2); //rb[2]=0.0;
r00 = rb[0][0];
r10 = rb[1][0];
r01 = rb[0][1];
r11 = rb[1][1];
g00 = lat.G(0);
g10 = lat.G(3);
g01 = lat.G(1);
g11 = lat.G(4);
T minusone = -1.0;
for (int idim = 0; idim < 2; idim++)
{
auto& corners_dim = corners[idim];
corners_dim[0] = T(0);
corners_dim[1] = minusone * (rb[0][idim]);
corners_dim[2] = minusone * (rb[1][idim]);
corners_dim[3] = minusone * (rb[0][idim] + rb[1][idim]);
}
}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
const auto& cellx = corners[0];
const auto& celly = corners[1];
constexpr T minusone(-1);
constexpr T one(1);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T flip = iat < flip_ind ? one : minusone;
const T displ_0 = (px[iat] - x0) * flip;
const T displ_1 = (py[iat] - y0) * flip;
const T delz = pz[iat] - z0;
const T ar_0 = -std::floor(displ_0 * g00 + displ_1 * g10);
const T ar_1 = -std::floor(displ_0 * g01 + displ_1 * g11);
const T delx = displ_0 + ar_0 * r00 + ar_1 * r10;
const T dely = displ_1 + ar_0 * r01 + ar_1 * r11;
T rmin = delx * delx + dely * dely;
int ic = 0;
#pragma unroll(3)
for (int c = 1; c < 4; ++c)
{
const T x = delx + cellx[c];
const T y = dely + celly[c];
const T r2 = x * x + y * y;
ic = (r2 < rmin) ? c : ic;
rmin = (r2 < rmin) ? r2 : rmin;
}
temp_r[iat] = std::sqrt(rmin + delz * delz);
dx[iat] = flip * (delx + cellx[ic]);
dy[iat] = flip * (dely + celly[ic]);
dz[iat] = delz;
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int iat,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
const auto& cellx = corners[0];
const auto& celly = corners[1];
constexpr T minusone(-1);
constexpr T one(1);
const T flip = iat < flip_ind ? one : minusone;
const T displ_0 = (px[iat] - x0) * flip;
const T displ_1 = (py[iat] - y0) * flip;
const T delz = pz[iat] - z0;
const T ar_0 = -std::floor(displ_0 * g00 + displ_1 * g10);
const T ar_1 = -std::floor(displ_0 * g01 + displ_1 * g11);
const T delx = displ_0 + ar_0 * r00 + ar_1 * r10;
const T dely = displ_1 + ar_0 * r01 + ar_1 * r11;
T rmin = delx * delx + dely * dely;
int ic = 0;
#pragma unroll(3)
for (int c = 1; c < 4; ++c)
{
const T x = delx + cellx[c];
const T y = dely + celly[c];
const T r2 = x * x + y * y;
ic = (r2 < rmin) ? c : ic;
rmin = (r2 < rmin) ? r2 : rmin;
}
temp_r[iat] = std::sqrt(rmin + delz * delz);
dx[iat] = flip * (delx + cellx[ic]);
dy[iat] = flip * (dely + celly[ic]);
dz[iat] = delz;
}
};
/** specialization for a slab, orthorombic cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNO + SOA_OFFSET>
{
T Linv0, L0, Linv1, L1;
inline DTD_BConds(const CrystalLattice<T, 3>& lat)
: Linv0(lat.OneOverLength[0]), L0(lat.Length[0]), Linv1(lat.OneOverLength[1]), L1(lat.Length[1])
{}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T x = (px[iat] - x0) * Linv0;
dx[iat] = L0 * (x - round(x));
T y = (py[iat] - y0) * Linv1;
dy[iat] = L1 * (y - round(y));
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int iat,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
T x = (px[iat] - x0) * Linv0;
dx[iat] = L0 * (x - round(x));
T y = (py[iat] - y0) * Linv1;
dy[iat] = L1 * (y - round(y));
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
};
/** specialization for a slab, general cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNS + SOA_OFFSET>
{
T r00, r10, r01, r11;
T g00, g10, g01, g11;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r01(lat.R(1)),
r11(lat.R(4)),
g00(lat.G(0)),
g10(lat.G(3)),
g01(lat.G(1)),
g11(lat.G(4))
{}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T displ_0 = px[iat] - x0;
T displ_1 = py[iat] - y0;
T ar_0 = displ_0 * g00 + displ_1 * g10;
T ar_1 = displ_0 * g01 + displ_1 * g11;
//put them in the box
ar_0 -= round(ar_0);
ar_1 -= round(ar_1);
//unit2cart
dx[iat] = ar_0 * r00 + ar_1 * r10;
dy[iat] = ar_0 * r01 + ar_1 * r11;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int iat,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
T displ_0 = px[iat] - x0;
T displ_1 = py[iat] - y0;
T ar_0 = displ_0 * g00 + displ_1 * g10;
T ar_1 = displ_0 * g01 + displ_1 * g11;
//put them in the box
ar_0 -= round(ar_0);
ar_1 -= round(ar_1);
//unit2cart
dx[iat] = ar_0 * r00 + ar_1 * r10;
dy[iat] = ar_0 * r01 + ar_1 * r11;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
};
/** specialization for a wire
*/
template<class T>
struct DTD_BConds<T, 3, SUPERCELL_WIRE + SOA_OFFSET>
{
T Linv0, L0;
inline DTD_BConds(const CrystalLattice<T, 3>& lat) : Linv0(lat.OneOverLength[0]), L0(lat.Length[0]) {}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T x = (px[iat] - x0) * Linv0;
dx[iat] = L0 * (x - round(x));
dy[iat] = py[iat] - y0;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int iat,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0;
const T* restrict py = R0 + padded_size;
const T* restrict pz = R0 + padded_size * 2;
T* restrict dx = temp_dr;
T* restrict dy = temp_dr + padded_size;
T* restrict dz = temp_dr + padded_size * 2;
T x = (px[iat] - x0) * Linv0;
dx[iat] = L0 * (x - round(x));
dy[iat] = py[iat] - y0;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
};
/** specialization for a periodic 3D general cell
*
* Slow method and not used unless one needs to check if faster methods fail
*/
template<class T>
struct DTD_BConds<T, 3, PPPX + SOA_OFFSET>
{
T r00, r10, r20, r01, r11, r21, r02, r12, r22;
T g00, g10, g20, g01, g11, g21, g02, g12, g22;
T r2max;
TinyVector<TinyVector<T, 26>, 3> nextcells;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r20(lat.R(6)),
r01(lat.R(1)),
r11(lat.R(4)),
r21(lat.R(7)),
r02(lat.R(2)),
r12(lat.R(5)),
r22(lat.R(8)),
g00(lat.G(0)),
g10(lat.G(3)),
g20(lat.G(6)),
g01(lat.G(1)),
g11(lat.G(4)),
g21(lat.G(7)),
g02(lat.G(2)),
g12(lat.G(5)),
g22(lat.G(8)),
r2max(lat.CellRadiusSq)
{
const auto& cellx = nextcells[0];
const auto& celly = nextcells[1];
const auto& cellz = nextcells[2];
int ic = 0;
for (int i = -1; i <= 1; ++i)
for (int j = -1; j <= 1; ++j)
for (int k = -1; k <= 1; ++k)
{
if (i == 0 && j == 0 && j == 0)
continue; //exclude zero
cellx[ic] = i * r00 + j * r10 + k * r20;
celly[ic] = i * r01 + j * r11 + k * r21;
cellz[ic] = i * r02 + j * r12 + k * r22;
++ic;
}
}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
APP_ABORT("DTD_BConds<T,3,PPPX> not implemented");
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int iat,
int flip_ind = 0)
{
//APP_ABORT("DTD_BConds<T, 3, PPPX + SOA_OFFSET>::computeDistancesOffload not implemented");
}
};
/** specialization for a slab, general cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNX + SOA_OFFSET>
{
T r00, r10, r01, r11;
T g00, g10, g01, g11;
T r2max;
TinyVector<TinyVector<T, 8>, 3> nextcells;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r01(lat.R(1)),
r11(lat.R(4)),
g00(lat.G(0)),
g10(lat.G(3)),
g01(lat.G(1)),
g11(lat.G(4)),
r2max(lat.CellRadiusSq)
{
const auto& cellx = nextcells[0];
const auto& celly = nextcells[1];
const auto& cellz = nextcells[2];
int ic = 0;
for (int i = -1; i <= 1; ++i)
for (int j = -1; j <= 1; ++j)
{
if (i == 0 && j == 0)
continue; //exclude zero
cellx[ic] = i * r00 + j * r10;
celly[ic] = i * r01 + j * r11;
cellz[ic] = T();
++ic;
}
}
template<typename PT, typename RSOA, typename DISPLSOA>
void computeDistances(const PT& pos,
const RSOA& R0,
T* restrict temp_r,
DISPLSOA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
APP_ABORT("DTD_BConds<T,3,PPNX> not implemented");
}
void computeDistancesOffload(const T pos[3],
const T* restrict R0,
T* restrict temp_r,
T* restrict temp_dr,
int padded_size,
int iat,
int flip_ind = 0)
{
//APP_ABORT("DTD_BConds<T, 3, PPNX + SOA_OFFSET>::computeDistancesOffload not implemented");
}
};
} // namespace qmcplusplus
#endif // OHMMS_PARTICLE_BCONDS_3D_H
|
ZQ_FaceIDPrecisionEvaluation.h | #ifndef _ZQ_FACEID_PRECISION_EVALUATION_H_
#define _ZQ_FACEID_PRECISION_EVALUATION_H_
#pragma once
#include "ZQ_FaceRecognizer.h"
#include "ZQ_FaceFeature.h"
#include "ZQ_MathBase.h"
#include "ZQ_MergeSort.h"
#include <opencv2\opencv.hpp>
#include <vector>
#include <stdlib.h>
#include <string>
#include <omp.h>
namespace ZQ
{
class ZQ_FaceIDPrecisionEvaluation
{
class EvaluationPair
{
public:
std::string fileL;
std::string nameL;
int idL;
std::string fileR;
std::string nameR;
int idR;
int flag; //-1 or 1
ZQ_FaceFeature featL;
ZQ_FaceFeature featR;
bool valid;
};
class EvaluationSingle
{
public:
std::string name;
int id;
ZQ_FaceFeature feat;
EvaluationSingle& operator = (const EvaluationSingle& v2)
{
name = v2.name;
id = v2.id;
feat.CopyData(v2.feat);
return *this;
}
bool operator < (const EvaluationSingle& v2) const
{
int cmp_v = _strcmpi(name.c_str(), v2.name.c_str());
if (cmp_v < 0)
return true;
else if (cmp_v > 0)
return false;
else
{
return id < v2.id;
}
}
bool operator > (const EvaluationSingle& v2) const
{
int cmp_v = _strcmpi(name.c_str(), v2.name.c_str());
if (cmp_v > 0)
return true;
else if (cmp_v < 0)
return false;
else
{
return id > v2.id;
}
}
bool operator == ( const EvaluationSingle& v2) const
{
int cmp_v = _strcmpi(name.c_str(), v2.name.c_str());
return cmp_v == 0 && id == v2.id;
}
bool SameName(const EvaluationSingle& v2) const
{
int cmp_v = _strcmpi(name.c_str(), v2.name.c_str());
return cmp_v == 0;
}
};
public:
static bool EvaluationOnLFW(std::vector<ZQ_FaceRecognizer*>& recognizers, const std::string& list_file, const std::string& folder, bool use_flip)
{
int recognizer_num = recognizers.size();
if (recognizer_num == 0)
return false;
int real_num_threads = __max(1, __min(recognizer_num, omp_get_num_procs() - 1));
int feat_dim = recognizers[0]->GetFeatDim();
int real_dim = use_flip ? (feat_dim * 2) : feat_dim;
printf("feat_dim = %d, real_dim = %d\n", feat_dim, real_dim);
std::vector<std::vector<EvaluationPair> > pairs;
if (!_parse_lfw_list(list_file, folder, pairs))
{
printf("failed to parse list file %s\n", list_file.c_str());
return EXIT_FAILURE;
}
printf("parse list file %s done!\n", list_file.c_str());
int part_num = pairs.size();
std::vector<std::pair<int, int> > pair_list;
for (int i = 0; i < part_num; i++)
{
for (int j = 0; j < pairs[i].size(); j++)
{
pair_list.push_back(std::make_pair(i, j));
}
}
double t1 = omp_get_wtime();
if (real_num_threads == 1)
{
int handled_num = 0;
for (int nn = 0; nn < pair_list.size(); nn++)
{
handled_num++;
if (handled_num % 100 == 0)
printf("%d handled\n", handled_num);
int i = pair_list[nn].first;
int j = pair_list[nn].second;
pairs[i][j].featL.ChangeSize(real_dim);
pairs[i][j].featR.ChangeSize(real_dim);
cv::Mat imgL = cv::imread(pairs[i][j].fileL);
if (imgL.empty())
{
printf("failed to load image %s\n", pairs[i][j].fileL.c_str());
pairs[i][j].valid = false;
continue;
}
cv::Mat imgR = cv::imread(pairs[i][j].fileR);
if (imgR.empty())
{
printf("failed to load image %s\n", pairs[i][j].fileR.c_str());
pairs[i][j].valid = false;
continue;
}
if (!recognizers[0]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData, true))
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str());
pairs[i][j].valid = false;
continue;
}
if (!recognizers[0]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData, true))
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str());
pairs[i][j].valid = false;
continue;
}
if (use_flip)
{
cv::flip(imgL, imgL, 1);
cv::flip(imgR, imgR, 1);
if (!recognizers[0]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData+feat_dim, true))
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str());
pairs[i][j].valid = false;
continue;
}
if (!recognizers[0]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData+feat_dim, true))
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str());
pairs[i][j].valid = false;
continue;
}
}
pairs[i][j].valid = true;
}
}
else
{
int handled_num = 0;
#pragma omp parallel for schedule(dynamic, 10) num_threads(real_num_threads)
for (int nn = 0; nn < pair_list.size(); nn++)
{
#pragma omp critical
{
handled_num++;
if (handled_num % 100 == 0)
{
printf("%d handled\n", handled_num);
}
}
int thread_id = omp_get_thread_num();
int i = pair_list[nn].first;
int j = pair_list[nn].second;
pairs[i][j].featL.ChangeSize(real_dim);
pairs[i][j].featR.ChangeSize(real_dim);
cv::Mat imgL = cv::imread(pairs[i][j].fileL);
if (imgL.empty())
{
#pragma omp critical
{
printf("failed to load image %s\n", pairs[i][j].fileL.c_str());
}
pairs[i][j].valid = false;
continue;
}
cv::Mat imgR = cv::imread(pairs[i][j].fileR);
if (imgR.empty())
{
#pragma omp critical
{
printf("failed to load image %s\n", pairs[i][j].fileR.c_str());
}
pairs[i][j].valid = false;
continue;
}
if (!recognizers[thread_id]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData, true))
{
#pragma omp critical
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str());
}
pairs[i][j].valid = false;
continue;
}
if (!recognizers[thread_id]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData, true))
{
#pragma omp critical
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str());
}
pairs[i][j].valid = false;
continue;
}
if (use_flip)
{
cv::flip(imgL, imgL, 1);
cv::flip(imgR, imgR, 1);
if (!recognizers[thread_id]->ExtractFeature(imgL.data, imgL.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featL.pData + feat_dim, true))
{
#pragma omp critical
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileL.c_str());
}
pairs[i][j].valid = false;
continue;
}
if (!recognizers[thread_id]->ExtractFeature(imgR.data, imgR.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, pairs[i][j].featR.pData + feat_dim, true))
{
#pragma omp critical
{
printf("failed to extract feature for image %s\n", pairs[i][j].fileR.c_str());
}
pairs[i][j].valid = false;
continue;
}
}
pairs[i][j].valid = true;
}
}
printf("extract feature done!");
double t2 = omp_get_wtime();
printf("extract features cost: %.3f secs\n", t2 - t1);
int erased_num = 0;
for (int i = 0; i < part_num; i++)
{
for (int j = pairs[i].size() - 1; j >= 0; j--)
{
if (!pairs[i][j].valid)
{
pairs[i].erase(pairs[i].begin() + j);
erased_num++;
}
else
{
ZQ_MathBase::Normalize(real_dim, pairs[i][j].featL.pData);
ZQ_MathBase::Normalize(real_dim, pairs[i][j].featR.pData);
}
}
}
printf("%d pairs haved been erased\n", erased_num);
std::vector<EvaluationSingle> singles;
for (int i = 0; i < part_num; i++)
{
for (int j = 0; j < pairs[i].size(); j++)
{
EvaluationSingle cur_single;
cur_single.name = pairs[i][j].nameL;
cur_single.id = pairs[i][j].idL;
cur_single.feat.CopyData(pairs[i][j].featL);
singles.push_back(cur_single);
cur_single.name = pairs[i][j].nameR;
cur_single.id = pairs[i][j].idR;
cur_single.feat.CopyData(pairs[i][j].featR);
singles.push_back(cur_single);
}
}
float ACC = _compute_accuracy(pairs);
_compute_far_tar(singles, real_num_threads);
return true;
}
private:
static float _compute_accuracy(const std::vector<std::vector<EvaluationPair> >& pairs)
{
int part_num = pairs.size();
std::vector<float> ACCs(part_num);
float ACC = 0;
for (int i = 0; i < part_num; i++)
{
std::vector<EvaluationPair> val_pairs;
for (int j = 0; j < part_num; j++)
{
if (j != i)
val_pairs.insert(val_pairs.end(), pairs[j].begin(), pairs[j].end());
}
ZQ_FaceFeature mu;
_compute_mu(val_pairs, mu);
std::vector<double> val_scores, test_scores;
_compute_scores(val_pairs, mu, val_scores);
_compute_scores(pairs[i], mu, test_scores);
double threshold = _get_threshold(val_pairs, val_scores, 10000);
ACCs[i] = _get_accuracy(pairs[i], test_scores, threshold);
ACC += ACCs[i];
printf("%d\t%2.2f%% (threshold = %f)\n", i, ACCs[i] * 100, threshold);
/*const static int BUF_LEN = 50;
char file[BUF_LEN];
sprintf_s(file, BUF_LEN, "%d_mu.txt", i);
FILE* out = 0;
fopen_s(&out, file, "w");
for (int k = 0; k < mu.length; k++)
fprintf(out, "%12.6f\n", mu.pData[k]);
fclose(out);
sprintf_s(file, BUF_LEN, "%d_validscores.txt", i);
fopen_s(&out, file, "w");
for (int k = 0; k < val_scores.size(); k++)
fprintf(out, "%12.6f\n", val_scores[k]);
fclose(out);
sprintf_s(file, BUF_LEN, "%d_testscores.txt", i);
fopen_s(&out, file, "w");
for (int k = 0; k < test_scores.size(); k++)
fprintf(out, "%12.6f\n", test_scores[k]);
fclose(out);*/
}
printf("----------------\n");
printf("AVE\t%2.2f%%\n", ACC / part_num * 100);
return ACC;
}
static bool _parse_lfw_list(const std::string& list_file, const std::string& folder, std::vector<std::vector<EvaluationPair> >& pairs)
{
FILE* in = 0;
if(0 != fopen_s(&in, list_file.c_str(), "r"))
return false;
int part_num, half_pair_num;
const static int BUF_LEN = 200;
char line[BUF_LEN];
fgets(line, BUF_LEN, in);
sscanf_s(line, "%d%d", &part_num, &half_pair_num);
pairs.resize(part_num);
std::vector<std::string> strings;
for (int i = 0; i < part_num; i++)
{
for (int j = 0; j < 2 * half_pair_num; j++)
{
fgets(line, 199, in);
int len = strlen(line);
if (line[len - 1] == '\n')
line[--len] = '\0';
std::string input = line;
_split_string(input, std::string("\t"), strings);
if (strings.size() == 3)
{
EvaluationPair cur_pair;
cur_pair.nameL = strings[0];
cur_pair.nameR = strings[0];
cur_pair.idL = atoi(strings[1].c_str());
cur_pair.idR = atoi(strings[2].c_str());
char num2str[BUF_LEN];
sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[1].c_str()));
cur_pair.fileL = folder + "\\" + strings[0] + "\\" + strings[0] + std::string(num2str);
sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[2].c_str()));
cur_pair.fileR = folder + "\\" + strings[0] + "\\" + strings[0] + std::string(num2str);
cur_pair.flag = 1;
pairs[i].push_back(cur_pair);
}
else if (strings.size() == 4)
{
EvaluationPair cur_pair;
cur_pair.nameL = strings[0];
cur_pair.nameR = strings[2];
cur_pair.idL = atoi(strings[1].c_str());
cur_pair.idR = atoi(strings[3].c_str());
char num2str[BUF_LEN];
sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[1].c_str()));
cur_pair.fileL = folder + "\\" + strings[0] + "\\" + strings[0] + std::string(num2str);
sprintf_s(num2str, BUF_LEN, "_%04i.jpg", atoi(strings[3].c_str()));
cur_pair.fileR = folder + "\\" + strings[2] + "\\" + strings[2] + std::string(num2str);
cur_pair.flag = -1;
pairs[i].push_back(cur_pair);
}
}
}
fclose(in);
return true;
}
static bool _compute_mu(const std::vector<EvaluationPair>& val_pairs, ZQ_FaceFeature& mu)
{
if (val_pairs.size() == 0)
return false;
int feat_dim = val_pairs[0].featL.length;
mu.ChangeSize(feat_dim);
std::vector<double> sum(feat_dim);
for (int dd = 0; dd < feat_dim; dd++)
sum[dd] = 0;
for (int i = 0; i < val_pairs.size(); i++)
{
for (int dd = 0; dd < feat_dim; dd++)
{
sum[dd] += val_pairs[i].featL.pData[dd];
sum[dd] += val_pairs[i].featR.pData[dd];
}
}
for (int dd = 0; dd < feat_dim; dd++)
{
mu.pData[dd] = sum[dd] / (2 * val_pairs.size());
}
return true;
}
static bool _compute_scores(const std::vector<EvaluationPair>& pairs, const ZQ_FaceFeature& mu, std::vector<double>& scores)
{
int num = pairs.size();
if (num == 0)
return false;
scores.resize(num);
int feat_dim = mu.length;
std::vector<double> featL(feat_dim), featR(feat_dim);
for (int i = 0; i < num; i++)
{
for (int j = 0; j < feat_dim; j++)
{
featL[j] = pairs[i].featL.pData[j] - mu.pData[j];
featR[j] = pairs[i].featR.pData[j] - mu.pData[j];
}
double lenL = 0, lenR = 0;
for (int j = 0; j < feat_dim; j++)
{
lenL += featL[j] * featL[j];
lenR += featR[j] * featR[j];
}
lenL = sqrt(lenL);
lenR = sqrt(lenR);
if (lenL != 0)
{
for (int j = 0; j < feat_dim; j++)
featL[j] /= lenL;
}
if (lenR != 0)
{
for (int j = 0; j < feat_dim; j++)
featR[j] /= lenR;
}
double sco = 0;
for (int j = 0; j < feat_dim; j++)
sco += featL[j] * featR[j];
scores[i] = sco;
}
return true;
}
static float _get_threshold(const std::vector<EvaluationPair>& pairs, const std::vector<double>& scores, int thrNum)
{
std::vector<double> accurarys(2 * thrNum + 1);
for (int i = 0; i < 2 * thrNum + 1; i++)
{
double threshold = (double)i / thrNum - 1;
accurarys[i] = _get_accuracy(pairs, scores, threshold);
}
double max_acc = accurarys[0];
for (int j = 1; j < 2 * thrNum + 1; j++)
max_acc = __max(max_acc, accurarys[j]);
double sum_threshold = 0;
int sum_num = 0;
for (int i = 0; i < 2 * thrNum + 1; i++)
{
if (max_acc == accurarys[i])
{
sum_threshold += (double)i / thrNum - 1;
sum_num++;
}
}
return sum_threshold / sum_num;
}
static float _get_accuracy(const std::vector<EvaluationPair>& pairs, const std::vector<double>& scores, double threshold)
{
if (pairs.size() == 0 || pairs.size() != scores.size())
return 0;
double sum = 0;
for (int i = 0; i < pairs.size(); i++)
{
if (pairs[i].flag > 0 && scores[i] > threshold || pairs[i].flag < 0 && scores[i] < threshold)
sum++;
}
return sum / pairs.size();
}
static void _split_string(const std::string& s, const std::string& delim, std::vector< std::string >& ret)
{
size_t last = 0;
size_t index = s.find_first_of(delim, last);
ret.clear();
while (index != std::string::npos)
{
ret.push_back(s.substr(last, index - last));
last = index + 1;
index = s.find_first_of(delim, last);
}
if (index - last>0)
{
ret.push_back(s.substr(last, index - last));
}
}
static void _compute_far_tar(std::vector<EvaluationSingle>& singles, int real_num_threads)
{
printf("compute far tar begin\n");
ZQ_MergeSort::MergeSort(&singles[0], singles.size(), true);
int removed_num = 0;
for (int i = singles.size() - 2; i >= 0; i--)
{
if (singles[i] == singles[i + 1])
{
singles.erase(singles.begin() + i + 1);
removed_num++;
}
}
int image_num = singles.size();
printf("%d removed, remain %d\n", removed_num, image_num);
int all_num = image_num*(image_num - 1)/2;
std::vector<float> all_scores(all_num);
std::vector<int> all_idx_i(all_num), all_idx_j(all_num);
std::vector<bool> all_flag(all_num);
std::vector<int> sort_indices(all_num);
int idx = 0;
int same_num = 0;
for (int i = 0; i < image_num; i++)
{
for (int j = i + 1; j < image_num; j++)
{
all_idx_i[idx] = i;
all_idx_j[idx] = j;
bool is_same = singles[i].SameName(singles[j]);
all_flag[idx] = is_same;
if (is_same)
same_num++;
sort_indices[idx] = idx;
idx++;
}
}
int notsame_num = all_num - same_num;
printf("all_num = %d, same_num = %d, notsame_num = %d\n", all_num, same_num, notsame_num);
double t1 = omp_get_wtime();
int dim = singles[0].feat.length;
if (real_num_threads == 1)
{
for (int n = 0; n < all_num; n++)
{
int i = all_idx_i[n];
int j = all_idx_j[n];
all_scores[n] = ZQ_MathBase::DotProduct(dim, singles[i].feat.pData, singles[j].feat.pData);
}
}
else
{
int chunk_size = (all_num + real_num_threads - 1) / real_num_threads;
#pragma omp parallel for schedule(static, chunk_size) num_threads(real_num_threads)
for (int n = 0; n < all_num; n++)
{
int i = all_idx_i[n];
int j = all_idx_j[n];
all_scores[n] = ZQ_MathBase::DotProduct(dim, singles[i].feat.pData, singles[j].feat.pData);
}
}
double t2 = omp_get_wtime();
printf("compute all scores cost: %.3f secs\n", t2 - t1);
ZQ_MergeSort::MergeSort(&all_scores[0], &sort_indices[0], all_num, false);
double t3 = omp_get_wtime();
printf("sort all scores cost: %.3f secs\n", t3 - t2);
const int stage_num = 4;
double far_num[stage_num] =
{
1e-6 * notsame_num,
1e-5 * notsame_num,
1e-4 * notsame_num,
1e-3 * notsame_num
};
int cur_far_num = 0;
int cur_tar_num = 0;
int cur_stage = 0;
for (int i = 0; i < all_num; i++)
{
if (cur_stage >= stage_num)
break;
int sort_id = sort_indices[i];
if (all_flag[sort_id])
{
cur_tar_num++;
}
else
{
cur_far_num++;
}
if (cur_far_num > far_num[cur_stage])
{
printf("thresh = %.5f far = %15e, tar = %15f\n", all_scores[i],
(double)cur_far_num / notsame_num, (double)cur_tar_num / same_num);
cur_stage++;
}
}
}
};
}
#endif
|
DRB014-outofbounds-orig-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The outmost loop is parallelized.
But the inner level loop has out of bound access for b[i][j] when j equals to 0.
This will case memory access of a previous row's last element.
For example, an array of 4x4:
j=0 1 2 3
i=0 x x x x
1 x x x x
2 x x x x
3 x x x x
outer loop: i=2,
inner loop: j=0
array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3]
due to linearized row-major storage of the 2-D array.
This causes loop-carried data dependence between i=2 and i=1.
Data race pair: b[i][j]@75 vs. b[i][j-1]@75.
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char * argv[])
{
int i, j;
int n = 100, m = 100;
double b[n][m];
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name main#0
for (i=1; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<m; j ++ )
{
b[i][j]=b[i-1][j];
}
}
printf("b[50][50]=%f\n", b[50][50]);
_ret_val_0=0;
return _ret_val_0;
}
|
composite.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE %
% C O O MM MM P P O O SS I T E %
% C O O M M M PPPP O O SSS I T EEE %
% C O O M M P O O SS I T E %
% CCCC OOO M M P OOO SSSSS IIIII T EEEEE %
% %
% %
% MagickCore Image Composite Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resample.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p o s i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompositeImage() returns the second image composited onto the first
% at the specified offset, using the specified composite method.
%
% The format of the CompositeImage method is:
%
% MagickBooleanType CompositeImage(Image *image,
% const Image *source_image,const CompositeOperator compose,
% const MagickBooleanType clip_to_self,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the canvas image, modified by he composition
%
% o source_image: the source image.
%
% o compose: This operator affects how the composite is applied to
% the image. The operators and how they are utilized are listed here
% http://www.w3.org/TR/SVG12/#compositing.
%
% o clip_to_self: set to MagickTrue to limit composition to area composed.
%
% o x_offset: the column offset of the composited image.
%
% o y_offset: the row offset of the composited image.
%
% Extra Controls from Image meta-data in 'image' (artifacts)
%
% o "compose:args"
% A string containing extra numerical arguments for specific compose
% methods, generally expressed as a 'geometry' or a comma separated list
% of numbers.
%
% Compose methods needing such arguments include "BlendCompositeOp" and
% "DisplaceCompositeOp".
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
Composition based on the SVG specification:
A Composition is defined by...
Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors
Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc)
Y = 1 for source preserved
Z = 1 for canvas preserved
Conversion to transparency (then optimized)
Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa)
Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa)
Where...
Sca = Sc*Sa normalized Source color divided by Source alpha
Dca = Dc*Da normalized Dest color divided by Dest alpha
Dc' = Dca'/Da' the desired color value for this channel.
Da' in in the follow formula as 'gamma' The resulting alpla value.
Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in
the following optimizations...
gamma = Sa+Da-Sa*Da;
gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta;
opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma
The above SVG definitions also definate that Mathematical Composition
methods should use a 'Over' blending mode for Alpha Channel.
It however was not applied for composition modes of 'Plus', 'Minus',
the modulus versions of 'Add' and 'Subtract'.
Mathematical operator changes to be applied from IM v6.7...
1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed
'ModulusAdd' and 'ModulusSubtract' for clarity.
2) All mathematical compositions work as per the SVG specification
with regard to blending. This now includes 'ModulusAdd' and
'ModulusSubtract'.
3) When the special channel flag 'sync' (syncronize channel updates)
is turned off (enabled by default) then mathematical compositions are
only performed on the channels specified, and are applied
independantally of each other. In other words the mathematics is
performed as 'pure' mathematical operations, rather than as image
operations.
*/
static void HCLComposite(const MagickRealType hue,const MagickRealType chroma,
const MagickRealType luma,MagickRealType *red,MagickRealType *green,
MagickRealType *blue)
{
MagickRealType
b,
c,
g,
h,
m,
r,
x;
/*
Convert HCL to RGB colorspace.
*/
assert(red != (MagickRealType *) NULL);
assert(green != (MagickRealType *) NULL);
assert(blue != (MagickRealType *) NULL);
h=6.0*hue;
c=chroma;
x=c*(1.0-fabs(fmod(h,2.0)-1.0));
r=0.0;
g=0.0;
b=0.0;
if ((0.0 <= h) && (h < 1.0))
{
r=c;
g=x;
}
else
if ((1.0 <= h) && (h < 2.0))
{
r=x;
g=c;
}
else
if ((2.0 <= h) && (h < 3.0))
{
g=c;
b=x;
}
else
if ((3.0 <= h) && (h < 4.0))
{
g=x;
b=c;
}
else
if ((4.0 <= h) && (h < 5.0))
{
r=x;
b=c;
}
else
if ((5.0 <= h) && (h < 6.0))
{
r=c;
b=x;
}
m=luma-(0.298839*r+0.586811*g+0.114350*b);
*red=QuantumRange*(r+m);
*green=QuantumRange*(g+m);
*blue=QuantumRange*(b+m);
}
static void CompositeHCL(const MagickRealType red,const MagickRealType green,
const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma,
MagickRealType *luma)
{
MagickRealType
b,
c,
g,
h,
max,
r;
/*
Convert RGB to HCL colorspace.
*/
assert(hue != (MagickRealType *) NULL);
assert(chroma != (MagickRealType *) NULL);
assert(luma != (MagickRealType *) NULL);
r=red;
g=green;
b=blue;
max=MagickMax(r,MagickMax(g,b));
c=max-(MagickRealType) MagickMin(r,MagickMin(g,b));
h=0.0;
if (c == 0)
h=0.0;
else
if (red == max)
h=fmod((g-b)/c+6.0,6.0);
else
if (green == max)
h=((b-r)/c)+2.0;
else
if (blue == max)
h=((r-g)/c)+4.0;
*hue=(h/6.0);
*chroma=QuantumScale*c;
*luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b);
}
static MagickBooleanType CompositeOverImage(Image *image,
const Image *source_image,const MagickBooleanType clip_to_self,
const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*image_view,
*source_view;
const char
*value;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Composite image.
*/
status=MagickTrue;
progress=0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
channels;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
Da,
Dc,
Dca,
gamma,
Sa,
Sc,
Sca;
register ssize_t
i;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,
source,exception);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
q[i]=source[channel];
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
if (GetPixelReadMask(source_image,p) == 0)
{
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
gamma=Sa+Da-Sa*Da;
gamma=PerceptibleReciprocal(gamma);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=GetPixelChannel(source_image,channel,p);
continue;
}
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
q[i]=clamp != MagickFalse ?
ClampPixel(QuantumRange*(Sa+Da-Sa*Da)) :
ClampToQuantum(QuantumRange*(Sa+Da-Sa*Da));
continue;
}
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
q[i]=clamp != MagickFalse ?
ClampPixel(gamma*QuantumRange*(Sca+Dca*(1.0-Sa))) :
ClampToQuantum(gamma*QuantumRange*(Sca+Dca*(1.0-Sa)));
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport MagickBooleanType CompositeImage(Image *image,
const Image *composite,const CompositeOperator compose,
const MagickBooleanType clip_to_self,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define CompositeImageTag "Composite/Image"
CacheView
*source_view,
*image_view;
const char
*value;
GeometryInfo
geometry_info;
Image
*canvas_image,
*source_image;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
MagickRealType
amount,
canvas_dissolve,
midpoint,
percent_luma,
percent_chroma,
source_dissolve,
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(composite!= (Image *) NULL);
assert(composite->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
source_image=CloneImage(composite,0,0,MagickTrue,exception);
if (source_image == (const Image *) NULL)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
(void) SetImageColorspace(source_image,image->colorspace,exception);
if ((image->alpha_trait != UndefinedPixelTrait) &&
(source_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(source_image,SetAlphaChannel,exception);
if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp))
{
status=CompositeOverImage(image,source_image,clip_to_self,x_offset,
y_offset,exception);
source_image=DestroyImage(source_image);
return(status);
}
amount=0.5;
canvas_image=(Image *) NULL;
canvas_dissolve=1.0;
clamp=MagickTrue;
value=GetImageArtifact(image,"compose:clamp");
if (value != (const char *) NULL)
clamp=IsStringTrue(value);
SetGeometryInfo(&geometry_info);
percent_luma=100.0;
percent_chroma=100.0;
source_dissolve=1.0;
threshold=0.05f;
switch (compose)
{
case CopyCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(source_image,p) == 0)
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++)
{
PixelChannel channel=GetPixelChannelChannel(source_image,i);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,
(MagickOffsetType) y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case IntensityCompositeOp:
{
if ((x_offset < 0) || (y_offset < 0))
break;
if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns)
break;
if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows)
break;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(source_image,image,source_image->rows,1)
#endif
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset,
source_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (GetPixelReadMask(source_image,p) == 0)
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
SetPixelAlpha(image,clamp != MagickFalse ?
ClampPixel(GetPixelIntensity(source_image,p)) :
ClampToQuantum(GetPixelIntensity(source_image,p)),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,
(MagickOffsetType) y,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
return(status);
}
case CopyAlphaCompositeOp:
case ChangeMaskCompositeOp:
{
/*
Modify canvas outside the overlaid region and require an alpha
channel to exist, to add transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case BlurCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
angle_range,
angle_start,
height,
width;
PixelInfo
pixel;
ResampleFilter
*resample_filter;
SegmentInfo
blur;
/*
Blur Image by resampling.
Blur Image dictated by an overlay gradient map: X = red_channel;
Y = green_channel; compose:args = x_scale[,y_scale[,angle]].
*/
canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
/*
Gather the maximum blur sigma values from user.
*/
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (const char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & WidthValue) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"InvalidSetting","'%s' '%s'","compose:args",value);
source_image=DestroyImage(source_image);
canvas_image=DestroyImage(canvas_image);
return(MagickFalse);
}
/*
Users input sigma now needs to be converted to the EWA ellipse size.
The filter defaults to a sigma of 0.5 so to make this match the
users input the ellipse size needs to be doubled.
*/
width=height=geometry_info.rho*2.0;
if ((flags & HeightValue) != 0 )
height=geometry_info.sigma*2.0;
/*
Default the unrotated ellipse width and height axis vectors.
*/
blur.x1=width;
blur.x2=0.0;
blur.y1=0.0;
blur.y2=height;
/* rotate vectors if a rotation angle is given */
if ((flags & XValue) != 0 )
{
MagickRealType
angle;
angle=DegreesToRadians(geometry_info.xi);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
/* Otherwise lets set a angle range and calculate in the loop */
angle_start=0.0;
angle_range=0.0;
if ((flags & YValue) != 0 )
{
angle_start=DegreesToRadians(geometry_info.xi);
angle_range=DegreesToRadians(geometry_info.psi)-angle_start;
}
/*
Set up a gaussian cylindrical filter for EWA Bluring.
As the minimum ellipse radius of support*1.0 the EWA algorithm
can only produce a minimum blur of 0.5 for Gaussian (support=2.0)
This means that even 'No Blur' will be still a little blurry!
The solution (as well as the problem of preventing any user
expert filter settings, is to set our own user settings, then
restore them afterwards.
*/
resample_filter=AcquireResampleFilter(image,exception);
SetResampleFilter(resample_filter,GaussianFilter);
/* do the variable blurring of each pixel in image */
GetPixelInfo(image,&pixel);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
if (fabs((double) angle_range) > MagickEpsilon)
{
MagickRealType
angle;
angle=angle_start+angle_range*QuantumScale*
GetPixelBlue(source_image,p);
blur.x1=width*cos(angle);
blur.x2=width*sin(angle);
blur.y1=(-height*sin(angle));
blur.y2=height*cos(angle);
}
#if 0
if ( x == 10 && y == 60 ) {
(void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1,
blur.x2,blur.y1, blur.y2);
(void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale*
GetPixelRed(p),QuantumScale*GetPixelGreen(p));
#endif
ScaleResampleFilter(resample_filter,
blur.x1*QuantumScale*GetPixelRed(source_image,p),
blur.y1*QuantumScale*GetPixelGreen(source_image,p),
blur.x2*QuantumScale*GetPixelRed(source_image,p),
blur.y2*QuantumScale*GetPixelGreen(source_image,p) );
(void) ResamplePixelColor(resample_filter,(double) x_offset+x,
(double) y_offset+y,&pixel,exception);
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
resample_filter=DestroyResampleFilter(resample_filter);
source_view=DestroyCacheView(source_view);
canvas_view=DestroyCacheView(canvas_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
CacheView
*canvas_view;
MagickRealType
horizontal_scale,
vertical_scale;
PixelInfo
pixel;
PointInfo
center,
offset;
/*
Displace/Distort based on overlay gradient map:
X = red_channel; Y = green_channel;
compose:args = x_scale[,y_scale[,center.x,center.y]]
*/
canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (canvas_image == (Image *) NULL)
{
source_image=DestroyImage(source_image);
return(MagickFalse);
}
SetGeometryInfo(&geometry_info);
flags=NoValue;
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
flags=ParseGeometry(value,&geometry_info);
if ((flags & (WidthValue | HeightValue)) == 0 )
{
if ((flags & AspectValue) == 0)
{
horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0;
vertical_scale=(MagickRealType) (source_image->rows-1)/2.0;
}
else
{
horizontal_scale=(MagickRealType) (image->columns-1)/2.0;
vertical_scale=(MagickRealType) (image->rows-1)/2.0;
}
}
else
{
horizontal_scale=geometry_info.rho;
vertical_scale=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
if ((flags & AspectValue) == 0)
{
horizontal_scale*=(source_image->columns-1)/200.0;
vertical_scale*=(source_image->rows-1)/200.0;
}
else
{
horizontal_scale*=(image->columns-1)/200.0;
vertical_scale*=(image->rows-1)/200.0;
}
}
if ((flags & HeightValue) == 0)
vertical_scale=horizontal_scale;
}
/*
Determine fixed center point for absolute distortion map
Absolute distort ==
Displace offset relative to a fixed absolute point
Select that point according to +X+Y user inputs.
default = center of overlay image
arg flag '!' = locations/percentage relative to background image
*/
center.x=(MagickRealType) x_offset;
center.y=(MagickRealType) y_offset;
if (compose == DistortCompositeOp)
{
if ((flags & XValue) == 0)
if ((flags & AspectValue) != 0)
center.x=(MagickRealType) ((image->columns-1)/2.0);
else
center.x=(MagickRealType) (x_offset+(source_image->columns-1)/
2.0);
else
if ((flags & AspectValue) != 0)
center.x=geometry_info.xi;
else
center.x=(MagickRealType) (x_offset+geometry_info.xi);
if ((flags & YValue) == 0)
if ((flags & AspectValue) != 0)
center.y=(MagickRealType) ((image->rows-1)/2.0);
else
center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0);
else
if ((flags & AspectValue) != 0)
center.y=geometry_info.psi;
else
center.y=(MagickRealType) (y_offset+geometry_info.psi);
}
/*
Shift the pixel offset point as defined by the provided,
displacement/distortion map. -- Like a lens...
*/
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
source_view=AcquireVirtualCacheView(source_image,exception);
canvas_view=AcquireAuthenticCacheView(canvas_image,exception);
for (y=0; y < (ssize_t) source_image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows))
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) source_image->columns; x++)
{
if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns))
{
p+=GetPixelChannels(source_image);
continue;
}
/*
Displace the offset.
*/
offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ?
x : 0);
offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)-
(((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType)
QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ?
y : 0);
(void) InterpolatePixelInfo(image,image_view,
UndefinedInterpolatePixel,(double) offset.x,(double) offset.y,
&pixel,exception);
/*
Mask with the 'invalid pixel mask' in alpha channel.
*/
pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)*
(QuantumScale*GetPixelAlpha(source_image,p));
SetPixelViaPixelInfo(canvas_image,&pixel,q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(canvas_image);
}
sync=SyncCacheViewAuthenticPixels(canvas_view,exception);
if (sync == MagickFalse)
break;
}
canvas_view=DestroyCacheView(canvas_view);
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
source_image=DestroyImage(source_image);
source_image=canvas_image;
break;
}
case DissolveCompositeOp:
{
/*
Geometry arguments to dissolve factors.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0;
if ((source_dissolve-MagickEpsilon) < 0.0)
source_dissolve=0.0;
if ((source_dissolve+MagickEpsilon) > 1.0)
{
canvas_dissolve=2.0-source_dissolve;
source_dissolve=1.0;
}
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
if ((canvas_dissolve-MagickEpsilon) < 0.0)
canvas_dissolve=0.0;
}
break;
}
case BlendCompositeOp:
{
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
source_dissolve=geometry_info.rho/100.0;
canvas_dissolve=1.0-source_dissolve;
if ((flags & SigmaValue) != 0)
canvas_dissolve=geometry_info.sigma/100.0;
}
break;
}
case MathematicsCompositeOp:
{
/*
Just collect the values from "compose:args", setting.
Unused values are set to zero automagically.
Arguments are normally a comma separated list, so this probably should
be changed to some 'general comma list' parser, (with a minimum
number of values)
*/
SetGeometryInfo(&geometry_info);
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
(void) ParseGeometry(value,&geometry_info);
break;
}
case ModulateCompositeOp:
{
/*
Determine the luma and chroma scale.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
percent_luma=geometry_info.rho;
if ((flags & SigmaValue) != 0)
percent_chroma=geometry_info.sigma;
}
break;
}
case ThresholdCompositeOp:
{
/*
Determine the amount and threshold.
*/
value=GetImageArtifact(image,"compose:args");
if (value != (char *) NULL)
{
flags=ParseGeometry(value,&geometry_info);
amount=geometry_info.rho;
threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
threshold=0.05f;
}
threshold*=QuantumRange;
break;
}
default:
break;
}
/*
Composite image.
*/
status=MagickTrue;
progress=0;
midpoint=((MagickRealType) QuantumRange+1.0)/2;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*pixels;
MagickRealType
blue,
chroma,
green,
hue,
luma,
red;
PixelInfo
canvas_pixel,
source_pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (clip_to_self != MagickFalse)
{
if (y < y_offset)
continue;
if ((y-y_offset) >= (ssize_t) source_image->rows)
continue;
}
/*
If pixels is NULL, y is outside overlay region.
*/
pixels=(Quantum *) NULL;
p=(Quantum *) NULL;
if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows))
{
p=GetCacheViewVirtualPixels(source_view,0,y-y_offset,
source_image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixels=p;
if (x_offset < 0)
p-=x_offset*GetPixelChannels(source_image);
}
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
chroma=0.0;
luma=0.0;
GetPixelInfo(image,&canvas_pixel);
GetPixelInfo(source_image,&source_pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
MagickRealType
alpha,
Da,
Dc,
Dca,
Sa,
Sc,
Sca;
register ssize_t
i;
size_t
channels;
if (clip_to_self != MagickFalse)
{
if (x < x_offset)
{
q+=GetPixelChannels(image);
continue;
}
if ((x-x_offset) >= (ssize_t) source_image->columns)
break;
}
if ((pixels == (Quantum *) NULL) || (x < x_offset) ||
((x-x_offset) >= (ssize_t) source_image->columns))
{
Quantum
source[MaxPixelChannels];
/*
Virtual composite:
Sc: source color.
Dc: canvas color.
*/
(void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source,
exception);
if (GetPixelReadMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
switch (compose)
{
case AlphaCompositeOp:
case ChangeMaskCompositeOp:
case CopyAlphaCompositeOp:
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case OutCompositeOp:
case SrcInCompositeOp:
case SrcOutCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) q[i];
break;
}
case ClearCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=0.0;
break;
}
case BlendCompositeOp:
case DissolveCompositeOp:
{
if (channel == AlphaPixelChannel)
pixel=canvas_dissolve*GetPixelAlpha(source_image,source);
else
pixel=(MagickRealType) source[channel];
break;
}
default:
{
pixel=(MagickRealType) source[channel];
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
}
q+=GetPixelChannels(image);
continue;
}
/*
Authentic composite:
Sa: normalized source alpha.
Da: normalized canvas alpha.
*/
Sa=QuantumScale*GetPixelAlpha(source_image,p);
Da=QuantumScale*GetPixelAlpha(image,q);
switch (compose)
{
case BumpmapCompositeOp:
{
alpha=GetPixelIntensity(source_image,p)*Sa;
break;
}
case ColorBurnCompositeOp:
case ColorDodgeCompositeOp:
case DarkenCompositeOp:
case DifferenceCompositeOp:
case DivideDstCompositeOp:
case DivideSrcCompositeOp:
case ExclusionCompositeOp:
case HardLightCompositeOp:
case HardMixCompositeOp:
case LinearBurnCompositeOp:
case LinearDodgeCompositeOp:
case LinearLightCompositeOp:
case LightenCompositeOp:
case MathematicsCompositeOp:
case MinusDstCompositeOp:
case MinusSrcCompositeOp:
case ModulusAddCompositeOp:
case ModulusSubtractCompositeOp:
case MultiplyCompositeOp:
case OverlayCompositeOp:
case PegtopLightCompositeOp:
case PinLightCompositeOp:
case ScreenCompositeOp:
case SoftLightCompositeOp:
case VividLightCompositeOp:
{
alpha=RoundToUnity(Sa+Da-Sa*Da);
break;
}
case DstAtopCompositeOp:
case DstInCompositeOp:
case InCompositeOp:
case SrcInCompositeOp:
{
alpha=Sa*Da;
break;
}
case DissolveCompositeOp:
{
alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+
canvas_dissolve*Da;
break;
}
case DstOverCompositeOp:
case OverCompositeOp:
case SrcOverCompositeOp:
{
alpha=Sa+Da-Sa*Da;
break;
}
case DstOutCompositeOp:
{
alpha=Da*(1.0-Sa);
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
alpha=Sa*(1.0-Da);
break;
}
case BlendCompositeOp:
case PlusCompositeOp:
{
alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da);
break;
}
case XorCompositeOp:
{
alpha=Sa+Da-2.0*Sa*Da;
break;
}
default:
{
alpha=1.0;
break;
}
}
if (GetPixelReadMask(image,q) == 0)
{
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
continue;
}
switch (compose)
{
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case ModulateCompositeOp:
case SaturateCompositeOp:
{
GetPixelInfoPixel(source_image,p,&source_pixel);
GetPixelInfoPixel(image,q,&canvas_pixel);
break;
}
default:
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
MagickRealType
pixel,
sans;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((source_traits == UndefinedPixelTrait) &&
(((compose != CopyAlphaCompositeOp) &&
(compose != ChangeMaskCompositeOp)) ||
(channel != AlphaPixelChannel)))
continue;
/*
Sc: source color.
Dc: canvas color.
*/
Sc=(MagickRealType) GetPixelChannel(source_image,channel,p);
Dc=(MagickRealType) q[i];
if ((traits & CopyPixelTrait) != 0)
{
/*
Copy channel.
*/
q[i]=Sc;
continue;
}
if (channel == AlphaPixelChannel)
{
/*
Set alpha channel.
*/
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case CopyBlackCompositeOp:
case CopyBlueCompositeOp:
case CopyCyanCompositeOp:
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
case CopyRedCompositeOp:
case CopyYellowCompositeOp:
case SrcAtopCompositeOp:
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
case ChangeMaskCompositeOp:
{
MagickBooleanType
equivalent;
if (Da < 0.5)
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q);
if (equivalent != MagickFalse)
pixel=(MagickRealType) TransparentAlpha;
else
pixel=(MagickRealType) OpaqueAlpha;
break;
}
case ClearCompositeOp:
{
pixel=(MagickRealType) TransparentAlpha;
break;
}
case ColorizeCompositeOp:
case HueCompositeOp:
case LuminizeCompositeOp:
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Da;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=QuantumRange*Sa;
break;
}
if (Sa < Da)
{
pixel=QuantumRange*Da;
break;
}
pixel=QuantumRange*Sa;
break;
}
case CopyAlphaCompositeOp:
{
if ((source_traits & BlendPixelTrait) == 0)
pixel=GetPixelIntensity(source_image,p);
else
pixel=QuantumRange*Sa;
break;
}
case CopyCompositeOp:
case DisplaceCompositeOp:
case DistortCompositeOp:
case DstAtopCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case LightenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sa : Da;
break;
}
case ModulateCompositeOp:
{
pixel=QuantumRange*Da;
break;
}
default:
{
pixel=QuantumRange*alpha;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) :
ClampToQuantum(pixel);
continue;
}
/*
Porter-Duff compositions:
Sca: source normalized color multiplied by alpha.
Dca: normalized canvas color multiplied by alpha.
*/
Sca=QuantumScale*Sa*Sc;
Dca=QuantumScale*Da*Dc;
switch (compose)
{
case DarkenCompositeOp:
case LightenCompositeOp:
case ModulusSubtractCompositeOp:
{
gamma=PerceptibleReciprocal(1.0-alpha);
break;
}
default:
{
gamma=PerceptibleReciprocal(alpha);
break;
}
}
pixel=Dc;
switch (compose)
{
case AlphaCompositeOp:
{
pixel=QuantumRange*Sa;
break;
}
case AtopCompositeOp:
case SrcAtopCompositeOp:
{
pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa));
break;
}
case BlendCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc);
break;
}
case BlurCompositeOp:
case CopyCompositeOp:
case ReplaceCompositeOp:
case SrcCompositeOp:
{
pixel=QuantumRange*Sca;
break;
}
case DisplaceCompositeOp:
case DistortCompositeOp:
{
pixel=Sc;
break;
}
case BumpmapCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc;
break;
}
case ChangeMaskCompositeOp:
{
pixel=Dc;
break;
}
case ClearCompositeOp:
{
pixel=0.0;
break;
}
case ColorBurnCompositeOp:
{
if ((Sca == 0.0) && (Dca == Da))
{
pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa));
break;
}
if (Sca == 0.0)
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-Dca/Da)*Sa/
Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ColorDodgeCompositeOp:
{
if ((Sca*Da+Dca*Sa) >= Sa*Da)
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
else
pixel=QuantumRange*gamma*(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*
(1.0-Sa));
break;
}
case ColorizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&sans,&sans,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case CopyAlphaCompositeOp:
{
pixel=Dc;
break;
}
case CopyBlackCompositeOp:
{
if (channel == BlackPixelChannel)
pixel=(MagickRealType) (QuantumRange-
GetPixelBlack(source_image,p));
break;
}
case CopyBlueCompositeOp:
case CopyYellowCompositeOp:
{
if (channel == BluePixelChannel)
pixel=(MagickRealType) GetPixelBlue(source_image,p);
break;
}
case CopyGreenCompositeOp:
case CopyMagentaCompositeOp:
{
if (channel == GreenPixelChannel)
pixel=(MagickRealType) GetPixelGreen(source_image,p);
break;
}
case CopyRedCompositeOp:
case CopyCyanCompositeOp:
{
if (channel == RedPixelChannel)
pixel=(MagickRealType) GetPixelRed(source_image,p);
break;
}
case DarkenCompositeOp:
{
/*
Darken is equivalent to a 'Minimum' method
OR a greyscale version of a binary 'Or'
OR the 'Intersection' of pixel sets.
*/
if ((Sca*Da) < (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case DarkenIntensityCompositeOp:
{
pixel=Sa*GetPixelIntensity(source_image,p) <
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case DifferenceCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa));
break;
}
case DissolveCompositeOp:
{
pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa*
canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc);
break;
}
case DivideDstCompositeOp:
{
if ((fabs((double) Sca) < MagickEpsilon) &&
(fabs((double) Dca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (fabs((double) Dca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case DivideSrcCompositeOp:
{
if ((fabs((double) Dca) < MagickEpsilon) &&
(fabs((double) Sca) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
if (fabs((double) Sca) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa/Sca+Dca*(1.0-Sa)+Sca*(1.0-Da));
break;
}
case DstAtopCompositeOp:
{
pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da));
break;
}
case DstCompositeOp:
case NoCompositeOp:
{
pixel=QuantumRange*Dca;
break;
}
case DstInCompositeOp:
{
pixel=QuantumRange*(Dca*Sa);
break;
}
case DstOutCompositeOp:
{
pixel=QuantumRange*(Dca*(1.0-Sa));
break;
}
case DstOverCompositeOp:
{
pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da));
break;
}
case ExclusionCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-
Sa));
break;
}
pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
case HardMixCompositeOp:
{
pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange);
break;
}
case HueCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&hue,&sans,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case InCompositeOp:
case SrcInCompositeOp:
{
pixel=QuantumRange*(Sca*Da);
break;
}
case LinearBurnCompositeOp:
{
/*
LinearBurn: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Sc + Dc - 1
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da);
break;
}
case LinearDodgeCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc);
break;
}
case LinearLightCompositeOp:
{
/*
LinearLight: as defined by Abode Photoshop, according to
http://www.simplefilter.de/en/basics/mixmods.html is:
f(Sc,Dc) = Dc + 2*Sc - 1
*/
pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca);
break;
}
case LightenCompositeOp:
{
if ((Sca*Da) > (Dca*Sa))
{
pixel=QuantumRange*(Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*(Dca+Sca*(1.0-Da));
break;
}
case LightenIntensityCompositeOp:
{
/*
Lighten is equivalent to a 'Maximum' method
OR a greyscale version of a binary 'And'
OR the 'Union' of pixel sets.
*/
pixel=Sa*GetPixelIntensity(source_image,p) >
Da*GetPixelIntensity(image,q) ? Sc : Dc;
break;
}
case LuminizeCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&sans,&luma);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case MathematicsCompositeOp:
{
/*
'Mathematics' a free form user control mathematical composition
is defined as...
f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D
Where the arguments A,B,C,D are (currently) passed to composite
as a command separated 'geometry' string in "compose:args" image
artifact.
A = a->rho, B = a->sigma, C = a->xi, D = a->psi
Applying the SVG transparency formula (see above), we get...
Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa)
Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) +
Dca*(1.0-Sa)
*/
pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+
geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+
geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case MinusDstCompositeOp:
{
pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa);
break;
}
case MinusSrcCompositeOp:
{
/*
Minus source from canvas.
f(Sc,Dc) = Sc - Dc
*/
pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da);
break;
}
case ModulateCompositeOp:
{
ssize_t
offset;
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint);
if (offset == 0)
{
pixel=Dc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
luma+=(0.01*percent_luma*offset)/midpoint;
chroma*=0.01*percent_chroma;
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ModulusAddCompositeOp:
{
pixel=Sc+Dc;
while (pixel > QuantumRange)
pixel-=QuantumRange;
while (pixel < 0.0)
pixel+=QuantumRange;
pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa));
break;
}
case ModulusSubtractCompositeOp:
{
pixel=Sc-Dc;
while (pixel > QuantumRange)
pixel-=QuantumRange;
while (pixel < 0.0)
pixel+=QuantumRange;
pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa));
break;
}
case MultiplyCompositeOp:
{
pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case OutCompositeOp:
case SrcOutCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da));
break;
}
case OverCompositeOp:
case SrcOverCompositeOp:
{
pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa));
break;
}
case OverlayCompositeOp:
{
if ((2.0*Dca) < Da)
{
pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0-
Da));
break;
}
pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+
Sca*(1.0-Da));
break;
}
case PegtopLightCompositeOp:
{
/*
PegTop: A Soft-Light alternative: A continuous version of the
Softlight function, producing very similar results.
f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc
http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm.
*/
if (fabs((double) Da) < MagickEpsilon)
{
pixel=QuantumRange*gamma*(Sca);
break;
}
pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-
Da)+Dca*(1.0-Sa));
break;
}
case PinLightCompositeOp:
{
/*
PinLight: A Photoshop 7 composition method
http://www.simplefilter.de/en/basics/mixmods.html
f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc
*/
if ((Dca*Sa) < (Da*(2.0*Sca-Sa)))
{
pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa));
break;
}
if ((Dca*Sa) > (2.0*Sca*Da))
{
pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca);
break;
}
case PlusCompositeOp:
{
pixel=QuantumRange*(Sca+Dca);
break;
}
case SaturateCompositeOp:
{
if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon)
{
pixel=Dc;
break;
}
if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon)
{
pixel=Sc;
break;
}
CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue,
&hue,&chroma,&luma);
CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue,
&sans,&chroma,&sans);
HCLComposite(hue,chroma,luma,&red,&green,&blue);
switch (channel)
{
case RedPixelChannel: pixel=red; break;
case GreenPixelChannel: pixel=green; break;
case BluePixelChannel: pixel=blue; break;
default: pixel=Dc; break;
}
break;
}
case ScreenCompositeOp:
{
/*
Screen: a negated multiply:
f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc)
*/
pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca);
break;
}
case SoftLightCompositeOp:
{
if ((2.0*Sca) < Sa)
{
pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-(Dca/Da)))+
Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da))
{
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*(Dca/Da)*
(4.0*(Dca/Da)+1.0)*((Dca/Da)-1.0)+7.0*(Dca/Da))+Sca*(1.0-Da)+
Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow((Dca/Da),0.5)-
(Dca/Da))+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
case ThresholdCompositeOp:
{
MagickRealType
delta;
delta=Sc-Dc;
if ((MagickRealType) fabs((double) (2.0*delta)) < threshold)
{
pixel=gamma*Dc;
break;
}
pixel=gamma*(Dc+delta*amount);
break;
}
case VividLightCompositeOp:
{
/*
VividLight: A Photoshop 7 composition method. See
http://www.simplefilter.de/en/basics/mixmods.html.
f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc))
*/
if ((fabs((double) Sa) < MagickEpsilon) ||
(fabs((double) (Sca-Sa)) < MagickEpsilon))
{
pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
if ((2.0*Sca) <= Sa)
{
pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca*
(1.0-Da)+Dca*(1.0-Sa));
break;
}
pixel=QuantumRange*gamma*(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca*
(1.0-Sa));
break;
}
case XorCompositeOp:
{
pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa));
break;
}
default:
{
pixel=Sc;
break;
}
}
q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel);
}
p+=GetPixelChannels(source_image);
channels=GetPixelChannels(source_image);
if (p >= (pixels+channels*source_image->columns))
p=pixels;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_CompositeImage)
#endif
proceed=SetImageProgress(image,CompositeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
if (canvas_image != (Image * ) NULL)
canvas_image=DestroyImage(canvas_image);
else
source_image=DestroyImage(source_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T e x t u r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TextureImage() repeatedly tiles the texture image across and down the image
% canvas.
%
% The format of the TextureImage method is:
%
% MagickBooleanType TextureImage(Image *image,const Image *texture,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o texture_image: This image is the texture to layer on the background.
%
*/
MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture,
ExceptionInfo *exception)
{
#define TextureImageTag "Texture/Image"
CacheView
*image_view,
*texture_view;
Image
*texture_image;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (texture == (const Image *) NULL)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
texture_image=CloneImage(texture,0,0,MagickTrue,exception);
if (texture_image == (const Image *) NULL)
return(MagickFalse);
(void) TransformImageColorspace(texture_image,image->colorspace,exception);
(void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod,
exception);
status=MagickTrue;
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) ||
(image->alpha_trait != UndefinedPixelTrait) ||
(texture_image->alpha_trait != UndefinedPixelTrait)))
{
/*
Tile texture onto the image background.
*/
for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows)
{
register ssize_t
x;
if (status == MagickFalse)
continue;
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
MagickBooleanType
thread_status;
thread_status=CompositeImage(image,texture_image,image->compose,
MagickTrue,x+texture_image->tile_offset.x,y+
texture_image->tile_offset.y,exception);
if (thread_status == MagickFalse)
{
status=thread_status;
break;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,TextureImageTag,(MagickOffsetType)
image->rows,image->rows);
texture_image=DestroyImage(texture_image);
return(status);
}
/*
Tile texture onto the image background (optimized).
*/
status=MagickTrue;
texture_view=AcquireVirtualCacheView(texture_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(texture_image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*p,
*pixels;
register ssize_t
x;
register Quantum
*q;
size_t
width;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,
(y+texture_image->tile_offset.y) % texture_image->rows,
texture_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns)
{
register ssize_t
j;
p=pixels;
width=texture_image->columns;
if ((x+(ssize_t) width) > (ssize_t) image->columns)
width=image->columns-x;
for (j=0; j < (ssize_t) width; j++)
{
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++)
{
PixelChannel channel=GetPixelChannelChannel(texture_image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait texture_traits=GetPixelChannelTraits(texture_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(texture_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(texture_image);
q+=GetPixelChannels(image);
}
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
texture_view=DestroyCacheView(texture_view);
image_view=DestroyCacheView(image_view);
texture_image=DestroyImage(texture_image);
return(status);
}
|
fm_loss.h | /**
* Copyright (c) 2015 by Contributors
*/
#ifndef DIFACTO_LOSS_FM_LOSS_H_
#define DIFACTO_LOSS_FM_LOSS_H_
#include <vector>
#include <cmath>
#include "difacto/base.h"
#include "dmlc/data.h"
#include "dmlc/io.h"
#include "difacto/loss.h"
#include "common/spmv.h"
#include "common/spmm.h"
#include "./logit_loss.h"
namespace difacto {
/**
* \brief parameters for FM loss
*/
struct FMLossParam : public dmlc::Parameter<FMLossParam> {
/**
* \brief the embedding dimension
*/
int V_dim;
DMLC_DECLARE_PARAMETER(FMLossParam) {
DMLC_DECLARE_FIELD(V_dim).set_range(0, 10000);
}
};
/**
* \brief the factonization machine loss
* :math:`f(x) = \langle w, x \rangle + \frac{1}{2} \|V x\|_2^2 - \sum_{i=1}^d x_i^2 \|V_i\|^2_2`
*/
class FMLoss : public Loss {
public:
FMLoss() {}
virtual ~FMLoss() {}
KWArgs Init(const KWArgs& kwargs) override {
return param_.InitAllowUnknown(kwargs);
}
/**
* \brief perform prediction
*
* pred = X * w + .5 * sum((X*V).^2 - (X.*X)*(V.*V), 2);
*
* where
* - sum(A, 2) : sum the rows of A
* - .* : elemenetal-wise times
*
* @param data the data
* @param param input parameters
* - param[0], real_t vector, the weights
* - param[1], int vector, the w positions
* - param[2], int vector, the V positions
* @param pred predict output, should be pre-allocated
*/
void Predict(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* pred) override {
CHECK_EQ(param.size(), 3);
Predict(data,
SArray<real_t>(param[0]),
SArray<int>(param[1]),
SArray<int>(param[2]),
pred);
}
void Predict(const dmlc::RowBlock<unsigned>& data,
const SArray<real_t>& weights,
const SArray<int>& w_pos,
const SArray<int>& V_pos,
SArray<real_t>* pred) {
// pred = X * w
SArray<real_t> w = weights;
SpMV::Times(data, w, pred, nthreads_, w_pos, {});
int V_dim = param_.V_dim;
if (V_dim == 0) return;
SArray<real_t> V = weights;
// XV_ = X*V
XV_.clear();
XV_.resize(data.size * V_dim, 0);
SpMM::Times(data, V, V_dim, &XV_, nthreads_, V_pos);
// XX = X.*X
auto XX = data;
if (XX.value) {
XX_.clear();
XX_.CopyFrom(XX.value+XX.offset[0], XX.offset[XX.size] - XX.offset[0]);
for (auto& v : XX_) v *= v;
XX.value = XX_.data();
}
// VV = V*V
SArray<real_t> VV(V.size());
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < V_pos.size(); ++i) {
int p = V_pos[i];
if (p < 0) continue;
for (int j = 0; j < V_dim; ++j) VV[p+j] = V[p+j] * V[p+j];
}
// XXVV = XX*VV
SArray<real_t> XXVV(XV_.size());
SpMM::Times(XX, VV, V_dim, &XXVV, nthreads_, V_pos);
// py += .5 * sum((V.XV).^2 - xxvv)
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < pred->size(); ++i) {
real_t* t = XV_.data() + i * V_dim;
real_t* tt = XXVV.data() + i * V_dim;
real_t s = 0;
for (int j = 0; j < V_dim; ++j) s += t[j] * t[j] - tt[j];
(*pred)[i] += .5 * s;
}
// projection
for (auto& p : *pred) p = p > 20 ? 20 : (p < -20 ? -20 : p);
}
/*!
* \brief compute the gradients
*
* p = - y ./ (1 + exp (y .* pred));
* grad_w = X' * p;
* grad_u = X' * diag(p) * X * V - diag((X.*X)'*p) * V
*
* @param data the data
* @param param input parameters
* - param[0], real_t vector, the weights
* - param[1], int vector, the w positions
* - param[2], int vector, the V positions
* - param[3], real_t vector, the predict output
* @param grad the results
*/
void CalcGrad(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* grad) override {
CHECK_EQ(param.size(), 4);
CalcGrad(data,
SArray<real_t>(param[0]),
SArray<int>(param[1]),
SArray<int>(param[2]),
SArray<real_t>(param[3]),
grad);
}
void CalcGrad(const dmlc::RowBlock<unsigned>& data,
const SArray<real_t>& weights,
const SArray<int>& w_pos,
const SArray<int>& V_pos,
const SArray<real_t>& pred,
SArray<real_t>* grad) {
// p = ...
SArray<real_t> p; p.CopyFrom(pred);
CHECK_EQ(p.size(), data.size);
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < p.size(); ++i) {
real_t y = data.label[i] > 0 ? 1 : -1;
p[i] = - y / (1 + std::exp(y * p[i]));
}
// grad_w = ...
SpMV::TransTimes(data, p, grad, nthreads_, {}, w_pos);
// grad_u = ...
int V_dim = param_.V_dim;
if (V_dim == 0) return;
SArray<real_t> V = weights;
// XXp = (X.*X)'*p
auto XX = data;
if (XX.value) {
CHECK_EQ(XX_.size(), XX.offset[XX.size] - XX.offset[0]);
XX.value = XX_.data();
}
SArray<real_t> XXp(V_pos.size());
SpMV::TransTimes(XX, p, &XXp, nthreads_);
// grad_u -= diag(XXp) * V,
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < V_pos.size(); ++i) {
int p = V_pos[i];
if (p < 0) continue;
for (int j = 0; j < V_dim; ++j) {
(*grad)[p+j] -= V[p+j] * XXp[i];
}
}
// XV_ = diag(p) * X * V
CHECK_EQ(XV_.size(), data.size * V_dim);
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < p.size(); ++i) {
for (int j = 0; j < V_dim; ++j) XV_[i*V_dim+j] *= p[i];
}
// grad_u += X' * diag(p) * X * V
SpMM::TransTimes(data, XV_, V_dim, grad, nthreads_, {}, V_pos);
}
private:
SArray<real_t> XV_;
SArray<dmlc::real_t> XX_;
FMLossParam param_;
};
} // namespace difacto
#endif // DIFACTO_LOSS_FM_LOSS_H_
|
GB_unaryop__minv_int64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int64_int64
// op(A') function: GB_tran__minv_int64_int64
// C type: int64_t
// A type: int64_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 64)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
int64_t z = (int64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int64_int64
(
int64_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
formal-actual-args-1.c | #include <assert.h>
struct Cube
{
int x;
int y;
int z;
};
#pragma omp declare target
int
foo (short a)
{
switch (a)
{
case 1:
return 11;
break;
case 33:
return 333;
break;
case 55:
return 55;
break;
default:
return -1;
}
}
int
bar (int a)
{
int *ptr = &a;
*ptr = 100;
return a + *ptr;
}
struct Cube
baz (struct Cube c)
{
c.x = 11;
return c;
}
#pragma omp end declare target
#define s 100
int
main (int argc)
{
/* Test 1: argument types: char to short. */
int array[s];
#pragma omp target map(tofrom : array[ : s])
{
for (char i = 0; i < s; i++)
array[i] = foo (i);
}
for (int i = 0; i < s; i++)
assert (array[i] == foo (i));
/* Test 2: argument address is taken. */
int v = 2;
#pragma omp target map(tofrom : v)
v = bar (v);
assert (v == 200);
/* Test 3: passing a structure as a function argument. */
struct Cube r;
struct Cube c = {.x = 1, .y = 2, .z = 3};
#pragma omp target map(to : r) map(from : c)
r = baz (c);
assert (r.x == 11);
assert (r.y == c.y);
assert (r.z == c.z);
}
|
text_parser.h | /*!
* Copyright (c) 2015 by Contributors
* \file text_parser.h
* \brief iterator parser to parse text format
* \author Tianqi Chen
*/
#ifndef DMLC_DATA_TEXT_PARSER_H_
#define DMLC_DATA_TEXT_PARSER_H_
#include <dmlc/data.h>
#include <dmlc/omp.h>
#include <vector>
#include <cstring>
#include <algorithm>
#include "./row_block.h"
#include "./parser.h"
namespace DMLC {
namespace data {
/*!
* \brief Text parser that parses the input lines
* and returns rows in input data
*/
template <typename IndexType>
class TextParserBase : public ParserImpl<IndexType> {
public:
explicit TextParserBase(InputSplit *source,
int nthread)
: bytes_read_(0), source_(source) {
int maxthread;
#pragma omp parallel
{
maxthread = std::max(omp_get_num_procs() / 2 - 4, 1);
}
nthread_ = std::min(maxthread, nthread);
}
virtual ~TextParserBase() {
delete source_;
}
virtual void BeforeFirst(void) {
source_->BeforeFirst();
}
virtual size_t BytesRead(void) const {
return bytes_read_;
}
virtual bool ParseNext(std::vector<RowBlockContainer<IndexType> > *data) {
return FillData(data);
}
protected:
/*!
* \brief parse data into out
* \param begin beginning of buffer
* \param end end of buffer
*/
virtual void ParseBlock(char *begin,
char *end,
RowBlockContainer<IndexType> *out) = 0;
/*!
* \brief read in next several blocks of data
* \param data vector of data to be returned
* \return true if the data is loaded, false if reach end
*/
inline bool FillData(std::vector<RowBlockContainer<IndexType> > *data);
/*!
* \brief start from bptr, go backward and find first endof line
* \param bptr end position to go backward
* \param begin the beginning position of buffer
* \return position of first endof line going backward
*/
inline char* BackFindEndLine(char *bptr,
char *begin) {
for (; bptr != begin; --bptr) {
if (*bptr == '\n' || *bptr == '\r') return bptr;
}
return begin;
}
private:
// nthread
int nthread_;
// number of bytes readed
size_t bytes_read_;
// source split that provides the data
InputSplit *source_;
};
// implementation
template <typename IndexType>
inline bool TextParserBase<IndexType>::
FillData(std::vector<RowBlockContainer<IndexType> > *data) {
InputSplit::Blob chunk;
if (!source_->NextChunk(&chunk)) return false;
const int nthread = omp_get_max_threads();
// reserve space for data
data->resize(nthread);
bytes_read_ += chunk.size;
CHECK_NE(chunk.size, 0U);
char *head = reinterpret_cast<char*>(chunk.dptr);
#pragma omp parallel num_threads(nthread)
{
// threadid
int tid = omp_get_thread_num();
size_t nstep = (chunk.size + nthread - 1) / nthread;
size_t sbegin = std::min(tid * nstep, chunk.size);
size_t send = std::min((tid + 1) * nstep, chunk.size);
char *pbegin = BackFindEndLine(head + sbegin, head);
char *pend;
if (tid + 1 == nthread) {
pend = head + send;
} else {
pend = BackFindEndLine(head + send, head);
}
ParseBlock(pbegin, pend, &(*data)[tid]);
}
this->data_ptr_ = 0;
return true;
}
} // namespace data
} // namespace DMLC
#endif // DMLC_DATA_TEXT_PARSER_H_
|
syrk_teams.c | /**
* syrk.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include "../../common/polybenchUtilFuncts.h"
// define the error threshold for the results "not matching"
#define ERROR_THRESHOLD 0.05
#define GPU 1
/* Problem size */
#define N 1024
#define M 1024
/* Declared constant values for alpha and beta */
/* (same as values in PolyBench 2.0) */
#define alpha 12435
#define beta 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE *A, DATA_TYPE *C, DATA_TYPE *D) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
A[i * M + j] = ((DATA_TYPE)i * j) / N;
}
for (j = 0; j < M; j++) {
C[i * M + j] = ((DATA_TYPE)i * j + 2) / N;
D[i * M + j] = ((DATA_TYPE)i * j + 2) / N;
}
}
}
void compareResults(DATA_TYPE *C, DATA_TYPE *D) {
int i, j, fail;
fail = 0;
// Compare C with D
for (i = 0; i < N; i++) {
for (j = 0; j < M; j++) {
if (percentDiff(C[i * M + j], D[i * M + j]) > ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
}
void syrk(DATA_TYPE *A, DATA_TYPE *C) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
C[i * M + j] *= beta;
}
}
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
for (int k = 0; k < M; k++) {
C[i * N + j] += alpha * A[i * M + k] * A[j * M + k];
}
}
}
}
void syrkGPU(DATA_TYPE *A, DATA_TYPE *D) {
#pragma omp target device(GPU) map(to : A[:N * M]) map(tofrom : D[:N * M])
{
#pragma omp teams
{
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
D[i * M + j] *= beta;
}
}
#pragma omp distribute parallel for collapse(2)
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
for (int k = 0; k < M; k++) {
D[i * M + j] += alpha * A[i * M + k] * A[j * M + k];
}
}
}
}
}
}
int main() {
double t_start, t_end, t_start_GPU, t_end_GPU;
DATA_TYPE *A;
DATA_TYPE *C;
DATA_TYPE *D;
A = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
C = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
D = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE));
fprintf(stdout, "<< Symmetric rank-k operations >>\n");
init_arrays(A, C, D);
t_start_GPU = rtclock();
syrkGPU(A, D);
t_end_GPU = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_GPU - t_start_GPU);
t_start = rtclock();
syrk(A, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, D);
free(A);
free(C);
free(D);
return 0;
}
|
NETLM_fmt_plug.c | /*
* NETLM_fmt.c -- LM Challenge/Response
*
* Written by JoMo-Kun <jmk at foofus.net> in 2007
* and placed in the public domain.
*
* Performance and OMP fixes by magnum 2011
*
* This algorithm is designed for performing brute-force cracking of the LM
* challenge/response pairs exchanged during network-based authentication
* attempts [1]. The captured challenge/response pairs from these attempts
* should be stored using the L0phtCrack 2.0 LC format, specifically:
* username:unused:unused:lm response:ntlm response:challenge. For example:
*
* CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1::
* C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788
*
* It should be noted that a LM authentication response is not same as a LM
* password hash, which can be extracted using tools such as FgDump [2]. LM
* responses can be gathered via normal network capture or via tools which
* perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can
* also be harvested using a modified Samba service [5] in conjunction with
* some trickery to convince the user to connect to it. I leave what that
* trickery may actually be as an exercise for the reader (HINT: Karma, NMB
* broadcasts, IE, Outlook, social engineering, ...).
*
* [1] http://davenport.sourceforge.net/ntlm.html#theLmResponse
* [2] http://www.foofus.net/~fizzgig/fgdump/
* [3] http://ettercap.sourceforge.net/
* [4] http://www.oxid.it/cain.html
* [5] http://www.foofus.net/jmk/smbchallenge.html
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_NETLM;
#elif FMT_REGISTERS_H
john_register_one(&fmt_NETLM);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 131072 // core i7 no HT
#endif
#endif // __MIC__
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "memory.h"
#include "unicode.h"
#include <openssl/des.h>
#include "memdbg.h"
#ifndef uchar
#define uchar unsigned char
#endif
#define FORMAT_LABEL "netlm"
#define FORMAT_NAME "LM C/R"
#define FORMAT_TAG "$NETLM$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 14
#define PARTIAL_BINARY_SIZE 8
#define BINARY_SIZE 24
#define BINARY_ALIGN 4
#define SALT_SIZE 8
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH 48
#define TOTAL_LENGTH 8 + 2 * SALT_SIZE + CIPHERTEXT_LENGTH
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"", "G3RG3P00!", {"User", "", "", "6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "ntlm-hash", "1122334455667788"} },
{"$NETLM$1122334455667788$16A7FDFE0CA109B937BFFB041F0E5B2D8B94A97D3FCA1A18", "hiyagerge"},
{"$NETLM$1122334455667788$B3A1B87DBBD4DF3CFA296198DD390C2F4E2E93C5C07B1D8B", "MEDUSAFGDUMP12"},
{"$NETLM$1122334455667788$0836F085B124F33895875FB1951905DD2F85252CC731BB25", "cory21"},
{"$NETLM$1122334455667788$6E1EC36D3417CE9E09A4424309F116C4C991948DAEB4ADAD", "G3RG3P00!"},
{"", "HIYAGERGE", {"User", "", "", "16A7FDFE0CA109B937BFFB041F0E5B2D8B94A97D3FCA1A18", "ntlm-hash", "1122334455667788"} },
{"", "MEDUSAFGDUMP12", {"User", "", "", "B3A1B87DBBD4DF3CFA296198DD390C2F4E2E93C5C07B1D8B", "ntlm-hash", "1122334455667788"} },
{"", "CORY21", {"User", "", "", "0836F085B124F33895875FB1951905DD2F85252CC731BB25", "ntlm-hash", "1122334455667788"} },
// repeat in exactly the same format that is used in john.pot (lower case hex)
{"$NETLM$1122334455667788$0836f085b124f33895875fb1951905dd2f85252cc731bb25", "CORY21"},
{NULL}
};
static uchar (*saved_key)[21];
static uchar (*saved_plain)[PLAINTEXT_LENGTH + 1];
static uchar (*output)[PARTIAL_BINARY_SIZE];
static uchar *challenge;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
output = mem_calloc(self->params.max_keys_per_crypt, sizeof(*output));
}
static void done(void)
{
MEM_FREE(output);
MEM_FREE(saved_plain);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) < TOTAL_LENGTH) return 0;
if (ciphertext[23] != '$') return 0;
if (strncmp(&ciphertext[24 + 2 * SALT_SIZE],
"00000000000000000000000000000000", 32) == 0)
return 0; // This is NTLM ESS C/R
for (pos = &ciphertext[24]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++)
;
if (!*pos && pos - ciphertext - 24 == CIPHERTEXT_LENGTH)
return 1;
else
return 0;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char *cp;
char *srv_challenge = split_fields[3];
char *nethashv2 = split_fields[4];
char *cli_challenge = split_fields[5];
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN))
return split_fields[1];
if (!srv_challenge || !nethashv2 || !cli_challenge)
return split_fields[1];
if (strlen(srv_challenge) != CIPHERTEXT_LENGTH)
return split_fields[1];
// if LMresp == NTresp then it's NTLM-only, not LM
if (!strncmp(srv_challenge, nethashv2, 48))
return split_fields[1];
// this string suggests we have an improperly formatted NTLMv2
if (strlen(nethashv2) > 31) {
if (!strncmp(&nethashv2[32], "0101000000000000", 16))
return split_fields[1];
}
cp = mem_alloc(7+strlen(srv_challenge)+1+strlen(cli_challenge)+1);
sprintf(cp, "%s%s$%s", FORMAT_TAG, cli_challenge, srv_challenge);
if (valid(cp,self)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TOTAL_LENGTH + 1];
memset(out, 0, TOTAL_LENGTH + 1);
memcpy(out, ciphertext, TOTAL_LENGTH);
strlwr(&out[FORMAT_TAG_LEN]); /* Exclude: $NETLM$ */
return out;
}
static void *get_binary(char *ciphertext)
{
static uchar *binary;
int i;
if (!binary) binary = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
ciphertext+=24;
for (i=0; i<BINARY_SIZE; i++)
{
binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4;
binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]);
}
return binary;
}
inline static void setup_des_key(unsigned char key_56[], DES_key_schedule *ks)
{
DES_cblock key;
key[0] = key_56[0];
key[1] = (key_56[0] << 7) | (key_56[1] >> 1);
key[2] = (key_56[1] << 6) | (key_56[2] >> 2);
key[3] = (key_56[2] << 5) | (key_56[3] >> 3);
key[4] = (key_56[3] << 4) | (key_56[4] >> 4);
key[5] = (key_56[4] << 3) | (key_56[5] >> 5);
key[6] = (key_56[5] << 2) | (key_56[6] >> 6);
key[7] = (key_56[6] << 1);
DES_set_key(&key, ks);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
DES_key_schedule ks;
int i = 0;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i, ks) shared(count, output, challenge, saved_key)
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (i = 0; i < count; i++)
#endif
{
/* Just do a partial binary, the first DES operation */
setup_des_key(saved_key[i], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)output[i],
&ks, DES_ENCRYPT);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (index=0; index<count; index++)
#endif
if (!memcmp(output[index], binary, PARTIAL_BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(output[index], binary, PARTIAL_BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
DES_key_schedule ks;
uchar binary[BINARY_SIZE];
/* NULL-pad 16-byte LM hash to 21-bytes (we postponed it until now) */
memset(&saved_key[index][16], 0, 5);
/* Split padded LM hash into three 7-byte thirds
DES-encrypt challenge using each third as a key
Concatenate three 8-byte resulting values to form 24-byte LM response */
setup_des_key(saved_key[index], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)binary, &ks, DES_ENCRYPT);
setup_des_key(&saved_key[index][7], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[8], &ks, DES_ENCRYPT);
setup_des_key(&saved_key[index][14], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[16], &ks, DES_ENCRYPT);
return (!memcmp(binary, get_binary(source), BINARY_SIZE));
}
static void *get_salt(char *ciphertext)
{
static unsigned char *binary_salt;
int i;
if (!binary_salt) binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
ciphertext += FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; ++i)
binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) + atoi16[ARCH_INDEX(ciphertext[i*2+1])];
return (void*)binary_salt;
}
static void set_salt(void *salt)
{
challenge = salt;
}
static void netlm_set_key(char *key, int index)
{
const unsigned char magic[] = {0x4b, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25};
DES_key_schedule ks;
strncpy((char *)saved_plain[index], key, sizeof(saved_plain[index]));
saved_plain[index][sizeof(saved_plain[index])-1] = 0;
/* Upper-case password */
enc_strupper((char*)saved_plain[index]);
/* Generate 16-byte LM hash */
setup_des_key(saved_plain[index], &ks);
DES_ecb_encrypt((DES_cblock*)magic, (DES_cblock*)saved_key[index], &ks, DES_ENCRYPT);
setup_des_key(&saved_plain[index][7], &ks);
DES_ecb_encrypt((DES_cblock*)magic, (DES_cblock*)&saved_key[index][8], &ks, DES_ENCRYPT);
/* NULL-padding the 16-byte LM hash to 21-bytes is done in cmp_exact */
}
static char *get_key(int index)
{
return (char*)saved_plain[index];
}
static int salt_hash(void *salt)
{
return *(uint32_t *)salt & (SALT_HASH_SIZE - 1);
}
static int get_hash_0(int index)
{
return *(uint32_t *)output[index] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return *(uint32_t *)output[index] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return *(uint32_t *)output[index] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return *(uint32_t *)output[index] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return *(uint32_t *)output[index] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return *(uint32_t *)output[index] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return *(uint32_t *)output[index] & PH_MASK_6;
}
struct fmt_main fmt_NETLM = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_8_BIT | FMT_TRUNC | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_OMP_BAD,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
netlm_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
sa.c | #include "common.h"
static void restore_edge(const int groups, const int kind_opt, int* restrict edge, int* restrict restored_line, const int* restrict restored_edge)
{
if(kind_opt != D_1G_OPT && kind_opt != D_2G_OPT)
ERROR("Wrong kind_opt: %d\n", kind_opt);
#pragma omp parallel for
for(int i=0;i<groups*kind_opt;i++){
edge[restored_line[i]*2 ] = restored_edge[i*2 ];
edge[restored_line[i]*2+1] = restored_edge[i*2+1];
}
}
static void restore_adj(const int degree, const int groups, int* restrict adj, const int kind_opt, int* restrict restored_adj_value,
int* restrict restored_adj_idx_y, int* restrict restored_adj_idx_x)
{
if(kind_opt != D_1G_OPT && kind_opt != D_2G_OPT)
ERROR("Wrong kind_opt: %d\n", kind_opt);
#pragma omp parallel for
for(int i=0;i<kind_opt*groups*2;i++){
int y = restored_adj_idx_y[i];
int x = restored_adj_idx_x[i];
adj[y*degree+x] = restored_adj_value[i];
}
}
static void copy_edge(int *restrict dst, const int *restrict src, const int n)
{
#pragma omp parallel for
for(int i=0;i<n;i++)
dst[i] = src[i];
}
static double uniform_rand()
{
return ((double)random()+1.0)/((double)RAND_MAX+2.0);
}
static void print_result_header()
{
PRINT_R0(" Times\t Temp\tCur. ASPL GAP\t\tBest ASPL GAP\t\t");
PRINT_R0("Cur. Dia. GAP\t\tBest Dia. GAP\tAccept Rate\n");
}
static void print_results(const long long num, const double temp, const double current_ASPL,
const double best_ASPL, const double low_ASPL, const int current_diam,
const int best_diam, const int low_diam, const long long accepts, const long long rejects)
{
PRINT_R0("%8lld\t%f\t", num, temp);
PRINT_R0("%f ( %f )\t%f ( %f )\t%d ( %d )\t\t\t%d ( %d )\t\t",
current_ASPL, current_ASPL-low_ASPL, best_ASPL, best_ASPL-low_ASPL,
current_diam, current_diam-low_diam, best_diam, best_diam-low_diam);
if(num != 0)
PRINT_R0("%.4f ( %lld / %lld )\n", (double)accepts/(accepts+rejects), accepts, (accepts+rejects));
else
PRINT_R0("-\n");
}
void create_adj(const int nodes, const int lines, const int degree,
const int edge[lines][2], int adj[nodes][degree])
{
int count[nodes];
for(int i=0;i<nodes;i++)
count[i] = 0;
for(int i=0;i<lines;i++){
int n1 = edge[i][0];
int n2 = edge[i][1];
adj[n1][count[n1]++] = n2;
adj[n2][count[n2]++] = n1;
}
}
#define CENTER_VERTEX -1
int distance(int nodes, const int a, const int b, const int added_centers)
{
if(a >= nodes-added_centers || b >= nodes-added_centers) return CENTER_VERTEX;
int v = MAX(a, b) - MIN(a, b);
if(added_centers) nodes -= added_centers;
return (v < nodes/2.0)? v : nodes-v;
}
bool check(const int nodes, const int based_nodes, const int lines, const int degree, const int groups,
int edge[lines][2], const int added_centers, int* adj, const int ii)
{
bool flag = true;
int based_lines = lines/groups;
#pragma omp parallel for
for(int i=0;i<based_lines;i++){
for(int j=1;j<groups;j++){
int k = j * based_lines + i;
if(distance(nodes, edge[i][0], edge[i][1], added_centers) != distance(nodes, edge[k][0], edge[k][1], added_centers)){
PRINT_R0("check 1: %d\n", ii);
PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d d=%d\n", i, edge[i][0], i, edge[i][1], distance(nodes, edge[i][0], edge[i][1], added_centers));
PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d d=%d\n", k, edge[k][0], k, edge[k][1], distance(nodes, edge[k][0], edge[k][1], added_centers));
flag = false;
}
}
}
#pragma omp parallel for
for(int i=0;i<based_lines;i++){
for(int j=1;j<groups;j++){
int k = j * based_lines + i;
if(order(nodes, edge[i][0], edge[i][1], added_centers) != order(nodes, edge[k][0], edge[k][1], added_centers)){
PRINT_R0("check 2 : %d\n", ii);
PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d %d\n", i, edge[i][0], i, edge[i][1], order(nodes, edge[i][0], edge[i][1], added_centers));
PRINT_R0("edge[%d][0] = %d : edge[%d][1] = %d %d\n", k, edge[k][0], k, edge[k][1], order(nodes, edge[k][0], edge[k][1], added_centers));
flag = false;
}
}
}
#pragma omp parallel for
for(int i=0;i<based_lines;i++){
if(order(nodes, edge[i][0], edge[i][1], added_centers) != MIDDLE)
for(int j=1;j<groups;j++){
int k = j * based_lines + i;
int tmp0 = edge[k][0] - edge[k-based_lines][0];
int tmp1 = edge[k][1] - edge[k-based_lines][1];
if(added_centers){
tmp0 = (tmp0 < 0)? tmp0+nodes-added_centers : tmp0;
tmp1 = (tmp1 < 0)? tmp1+nodes-added_centers : tmp1;
}
else{
tmp0 = (tmp0 < 0)? tmp0 + nodes : tmp0;
tmp1 = (tmp1 < 0)? tmp1 + nodes : tmp1;
}
if(tmp0 != based_nodes || tmp1 != based_nodes){
PRINT_R0("check 4: %d\n", ii);
PRINT_R0("The different group relationship\n");
PRINT_R0("edge[%d][0]-edge[%d][0] = %d - %d = %d != %d\n", k, k-based_lines, edge[k][0], edge[k-based_lines][0], tmp0, based_nodes);
PRINT_R0("edge[%d][1]-edge[%d][1] = %d - %d = %d != %d\n", k, k-based_lines, edge[k][1], edge[k-based_lines][1], tmp1, based_nodes);
flag = false;
}
}
}
if(adj != NULL){
int *tmp_adj = malloc(sizeof(int)*nodes*degree);
create_adj(nodes, lines, degree, (const int (*)[2])edge, (int (*)[degree])tmp_adj);
for(int i=0;i<nodes;i++){
int sum[2] = {0,0};
for(int j=0;j<degree;j++){
sum[0] += *(adj + i * degree + j);
sum[1] += *(tmp_adj + i * degree + j);
}
if(sum[0] != sum[1]){
PRINT_R0("[ii=%d] Error 5 %d %d\n", ii, sum[0], sum[1]);
for(int j=0;j<degree;j++)
PRINT_R0("%d ", *(adj + i * degree + j));
PRINT_R0("\n");
for(int j=0;j<degree;j++)
PRINT_R0("%d ", *(tmp_adj + i * degree + j));
PRINT_R0("\n");
flag = false;
break;
}
}
for(int i=0;i<nodes;i++){
for(int j=0;j<degree;j++){
int tmp = *(adj + i * degree + j);
int k;
for(k=0;k<degree;k++)
if(tmp == *(tmp_adj + i * degree + k))
break;
if(k == degree){
PRINT_R0("[ii=%d] Error 6\n", ii);
flag = false;
break;
}
}
}
for(int i=0;i<nodes;i++){
for(int j=0;j<degree;j++){
int tmp = *(tmp_adj + i * degree + j);
int k;
for(k=0;k<degree;k++)
if(tmp == *(adj + i * degree + k))
break;
if(k == degree){
PRINT_R0("[ii=%d] Error 7\n", ii);
flag = false;
break;
}
}
}
for(int i=0;i<nodes;i++){
for(int j=0;j<degree;j++){
int tmp = *(adj + i * degree + j);
for(int k=j+1;k<degree;k++)
if(tmp == *(adj + i * degree + k)){
flag = false;
break;
}
}
}
free(tmp_adj);
}
return flag;
}
bool has_duplicated_vertex(const int e00, const int e01, const int e10, const int e11)
{
return (e00 == e10 || e01 == e11 || e00 == e11 || e01 == e10);
}
void exchange_edge_2opt(const int nodes, const int lines, const int groups, const int degree,
const int based_nodes, int edge[lines][2], const int added_centers,
int* restrict adj, int *kind_opt, int* restrict restored_edge, int* restrict restored_line,
int* restrict restored_adj_value, int* restrict restored_adj_idx_y,
int* restrict restored_adj_idx_x, const bool is_simple_graph, const int ii)
{
int tmp_line[groups*2], tmp_edge[groups*2][2], r;
int based_lines = lines / groups;
while(1){
while(1){
while(1){
tmp_line[0] = getRandom(lines);
tmp_line[1] = getRandom(lines);
if(tmp_line[0] != tmp_line[1]) break;
}
if(has_duplicated_vertex(edge[tmp_line[0]][0], edge[tmp_line[0]][1], edge[tmp_line[1]][0], edge[tmp_line[1]][1])){
continue;
}
else if((tmp_line[0] - tmp_line[1]) % based_lines == 0){
if(edge_1g_opt(edge, nodes, lines, degree, based_nodes, based_lines, groups, tmp_line[0], added_centers,
adj, kind_opt, restored_edge, restored_line, restored_adj_value, restored_adj_idx_y,
restored_adj_idx_x, is_simple_graph, ii))
return;
else
continue;
}
else break;
}
bool flag0 = (distance(nodes, edge[tmp_line[0]][0], edge[tmp_line[0]][1], added_centers) == (nodes-added_centers)/2);
bool flag1 = (distance(nodes, edge[tmp_line[1]][0], edge[tmp_line[1]][1], added_centers) == (nodes-added_centers)/2);
bool diameter_flag = ((flag0 || flag1) && groups%2 == 0);
if(diameter_flag){
if(edge_1g_opt(edge, nodes, lines, degree, based_nodes, based_lines, groups, tmp_line[0], added_centers,
adj, kind_opt, restored_edge, restored_line, restored_adj_value, restored_adj_idx_y,
restored_adj_idx_x, is_simple_graph, ii))
return;
else
continue;
}
// 2g-opt
for(int i=1;i<groups;i++){
int tmp0 = tmp_line[0] + based_lines * i;
int tmp1 = tmp_line[1] + based_lines * i;
tmp_line[0+2*i] = (tmp0 >= lines)? tmp0 - lines : tmp0;
tmp_line[1+2*i] = (tmp1 >= lines)? tmp1 - lines : tmp1;
}
for(int i=0;i<groups*2;i++)
for(int j=0;j<2;j++)
tmp_edge[i][j] = edge[tmp_line[i]][j];
r = getRandom(2);
if(r == 0){
for(int i=0;i<groups;i++)
swap(&tmp_edge[i*2][1], &tmp_edge[i*2+1][1]);
}
else{
for(int i=0;i<groups;i++)
swap(&tmp_edge[i*2][1], &tmp_edge[i*2+1][0]);
}
assert(check_loop(groups*2, tmp_edge));
if(!check_duplicate_tmp_edge(2, groups, tmp_edge))
continue;
else if(!check_duplicate_current_edge(lines, groups*2, tmp_line, edge, tmp_edge, groups, 2, false))
continue;
else
break;
} // end while
for(int i=0;i<groups*2;i++)
if(order(nodes, tmp_edge[i][0], tmp_edge[i][1], added_centers) == RIGHT)
swap(&tmp_edge[i][0], &tmp_edge[i][1]); // RIGHT -> LEFT
if(is_simple_graph){
// Change a part of adj.
int y0[groups], y1[groups], y2[groups], y3[groups];
int x0[groups], x1[groups], x2[groups], x3[groups];
#pragma omp parallel for
for(int i=0;i<groups;i++){
y0[i] = edge[tmp_line[i*2 ]][0];
y1[i] = edge[tmp_line[i*2 ]][1];
y2[i] = edge[tmp_line[i*2+1]][0];
y3[i] = edge[tmp_line[i*2+1]][1];
for(x0[i]=0;x0[i]<degree;x0[i]++)
if(adj[y0[i]*degree+x0[i]] == y1[i])
break;
for(x1[i]=0;x1[i]<degree;x1[i]++)
if(adj[y1[i]*degree+x1[i]] == y0[i])
break;
for(x2[i]=0;x2[i]<degree;x2[i]++)
if(adj[y2[i]*degree+x2[i]] == y3[i])
break;
for(x3[i]=0;x3[i]<degree;x3[i]++)
if(adj[y3[i]*degree+x3[i]] == y2[i])
break;
if(x0[i] == degree || x1[i] == degree || x2[i] == degree || x3[i] == degree)
ERROR("%d : %d %d %d %d\n", ii, x0[i], x1[i], x2[i], x3[i]);
restored_adj_idx_y[i*4 ] = y0[i];
restored_adj_idx_x[i*4 ] = x0[i];
restored_adj_idx_y[i*4+1] = y1[i];
restored_adj_idx_x[i*4+1] = x1[i];
restored_adj_idx_y[i*4+2] = y2[i];
restored_adj_idx_x[i*4+2] = x2[i];
restored_adj_idx_y[i*4+3] = y3[i];
restored_adj_idx_x[i*4+3] = x3[i];
restored_adj_value[i*4 ] = adj[y0[i]*degree+x0[i]];
restored_adj_value[i*4+1] = adj[y1[i]*degree+x1[i]];
restored_adj_value[i*4+2] = adj[y2[i]*degree+x2[i]];
restored_adj_value[i*4+3] = adj[y3[i]*degree+x3[i]];
//
restored_line[i*2 ] = tmp_line[i*2 ];
restored_line[i*2+1] = tmp_line[i*2+1];
restored_edge[i*4 ] = edge[tmp_line[i*2 ]][0];
restored_edge[i*4+1] = edge[tmp_line[i*2 ]][1];
restored_edge[i*4+2] = edge[tmp_line[i*2+1]][0];
restored_edge[i*4+3] = edge[tmp_line[i*2+1]][1];
}
#pragma omp parallel for
for(int i=0;i<groups;i++){
if(r==0){
adj[y0[i]*degree+x0[i]] = y3[i]; adj[y1[i]*degree+x1[i]] = y2[i];
adj[y2[i]*degree+x2[i]] = y1[i]; adj[y3[i]*degree+x3[i]] = y0[i];
}
else{
adj[y0[i]*degree+x0[i]] = y2[i]; adj[y1[i]*degree+x1[i]] = y3[i];
adj[y2[i]*degree+x2[i]] = y0[i]; adj[y3[i]*degree+x3[i]] = y1[i];
}
}
}
#pragma omp parallel for
for(int i=0;i<groups;i++){
edge[tmp_line[i*2 ]][0] = tmp_edge[i*2 ][0];
edge[tmp_line[i*2+1]][0] = tmp_edge[i*2+1][0];
edge[tmp_line[i*2 ]][1] = tmp_edge[i*2 ][1];
edge[tmp_line[i*2+1]][1] = tmp_edge[i*2+1][1];
}
*kind_opt = D_2G_OPT;
}
static bool accept(const int new_diam, const int current_diam, const double new_ASPL, const double current_ASPL,
const double temp, const int nodes, const int groups,
const bool hill_climbing_flag, const bool detect_temp_flag, const long long i,
double *max_diff_energy, long long *total_accepts, long long *accepts, long long *rejects)
{
if(new_diam < current_diam){
*accepts += 1;
if(i > SKIP_ACCEPTS) *total_accepts +=1;
return true;
}
else if(new_diam > current_diam){
*rejects += 1;
return false;
}
else{ // new_diam == current_diam
if(new_ASPL <= current_ASPL){
*accepts += 1;
if(i > SKIP_ACCEPTS) *total_accepts +=1;
return true;
}
else if(hill_climbing_flag){ // Only accept when ASPL <= current_ASPL.
*rejects += 1;
return false;
}
double diff = ((current_ASPL-new_ASPL)*nodes*(nodes-1))/groups;
if(detect_temp_flag)
*max_diff_energy = MAX(*max_diff_energy, -1.0 * diff);
if(exp(diff/temp) > uniform_rand()){
*accepts += 1;
if(i > SKIP_ACCEPTS) *total_accepts +=1;
return true;
}
else{
*rejects += 1;
return false;
}
}
}
long long sa(const int nodes, const int lines, const int degree, const int groups, double temp,
const long long ncalcs, const double cooling_rate, const int low_diam, const double low_ASPL,
const bool hill_climbing_flag, const bool detect_temp_flag, double *max_diff_energy,
int edge[lines][2], int *diam, double *ASPL, const int cooling_cycle, const int added_centers,
const int based_nodes, long long *total_accepts, const bool is_simple_graph, const int algo)
{
long long ii, accepts = 0, rejects = 0;
int (*best_edge)[2] = malloc(sizeof(int)*lines*2); // best_edge[lines][2]
int (*tmp_edge)[2] = malloc(sizeof(int)*lines*2); // tmp_edge[lines][2]
int (*tmp_edge_nsg)[2] = malloc(sizeof(int)*lines*2); // tmp_edge_nsg[lines][2] /* nsg = not simple graph */
int restored_adj_value[groups*4], restored_adj_idx_y[groups*4], restored_adj_idx_x[groups*4], kind_opt;
int restored_edge[groups*4], restored_line[groups*2];
bool restore_flag = false;
copy_edge((int *)best_edge, (int *)edge, lines*2);
copy_edge((int *)tmp_edge, (int *)edge, lines*2);
// Create adj matrix
int *adj = malloc(sizeof(int)*nodes*degree); // int adj[nodes][degree];
create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj);
evaluation(nodes, based_nodes, groups, lines, degree, adj, diam, ASPL, added_centers, algo);
double current_ASPL = *ASPL;
double best_ASPL = *ASPL;
int current_diam = *diam;
int best_diam = *diam;
int print_interval = (ncalcs/NUM_OF_PROGRESS == 0)? 1 : ncalcs/NUM_OF_PROGRESS;
if(rank == 0 && !detect_temp_flag)
print_result_header();
for(ii=0;ii<ncalcs;ii++){
double tmp_ASPL;
int tmp_diam;
if(ii % print_interval == 0 && !detect_temp_flag){
print_results(ii, temp, current_ASPL, best_ASPL, low_ASPL,
current_diam, best_diam, low_diam, accepts, rejects);
accepts = 0;
rejects = 0;
}
while(1){
if(is_simple_graph){
if(restore_flag){
restore_adj(degree, groups, adj, kind_opt, restored_adj_value, restored_adj_idx_y, restored_adj_idx_x);
restore_edge(groups, kind_opt, (int *)tmp_edge, restored_line, restored_edge);
}
}
else{
copy_edge((int *)tmp_edge_nsg, (int *)tmp_edge, lines*2);
}
exchange_edge_2opt(nodes, lines, groups, degree, based_nodes, tmp_edge, added_centers,
adj, &kind_opt, restored_edge, restored_line, restored_adj_value,
restored_adj_idx_y, restored_adj_idx_x, is_simple_graph, (int)ii);
if(!is_simple_graph)
create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj);
assert(check(nodes, based_nodes, lines, degree, groups, tmp_edge, added_centers, adj, (int)ii));
if(evaluation(nodes, based_nodes, groups, lines, degree, adj, &tmp_diam, &tmp_ASPL, added_centers, algo))
break;
else{
if(is_simple_graph)
restore_flag = true;
else
copy_edge((int *)tmp_edge, (int *)tmp_edge_nsg, lines*2);
}
}
if(!accept(tmp_diam, current_diam, tmp_ASPL, current_ASPL, temp, nodes, groups, hill_climbing_flag,
detect_temp_flag, ii, max_diff_energy, total_accepts, &accepts, &rejects)){
if(is_simple_graph)
restore_flag = true;
else
copy_edge((int *)tmp_edge, (int *)tmp_edge_nsg, lines*2);
}
else{
if(is_simple_graph) restore_flag = false;
current_ASPL = tmp_ASPL;
current_diam = tmp_diam;
if((best_diam > current_diam) || (best_diam == current_diam && best_ASPL > current_ASPL)){
copy_edge((int *)best_edge, (int *)tmp_edge, lines*2);
best_ASPL = current_ASPL;
best_diam = current_diam;
}
if(best_diam == current_diam && best_ASPL == low_ASPL){
if(!detect_temp_flag){
print_results(ii, temp, current_ASPL, best_ASPL, low_ASPL,
current_diam, best_diam, low_diam, accepts, rejects);
PRINT_R0("---\nFound optimum solution.\n");
}
break;
}
}
if((ii+1)%cooling_cycle == 0)
temp *= cooling_rate;
}
*ASPL = best_ASPL;
*diam = best_diam;
copy_edge((int *)edge, (int *)best_edge, lines*2);
free(adj);
free(best_edge);
free(tmp_edge);
free(tmp_edge_nsg);
return ii;
}
#define ESTIMATED_TIMES 5
double estimate_elapse_time(const int nodes, const int based_nodes, const int lines, const int degree,
const int groups, int edge[lines][2], const int added_centers,
const bool is_simple_graph, const int algo)
{
int diam; // Not use
double ASPL; // Not use
int *adj = malloc(sizeof(int)*nodes*degree); // int adj[nodes][degree];
int (*tmp_edge)[2] = malloc(sizeof(int)*lines*2); // int tmp_edge[lines][2];
int kind_opt;
int restored_adj_value[groups*4], restored_adj_idx_y[groups*4], restored_adj_idx_x[groups*4];
int restored_edge[groups*4], restored_line[groups*2];
copy_edge((int *)tmp_edge, (int *)edge, lines*2);
create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj);
timer_start(TIMER_ESTIMATED);
for(int i=0;i<ESTIMATED_TIMES;i++){
exchange_edge_2opt(nodes, lines, groups, degree, based_nodes, tmp_edge, added_centers, adj,
&kind_opt, restored_edge, restored_line, restored_adj_value,
restored_adj_idx_y, restored_adj_idx_x, is_simple_graph, (int)i);
if(!is_simple_graph)
create_adj(nodes, lines, degree, (const int (*)[2])tmp_edge, (int (*)[degree])adj);
assert(check(nodes, based_nodes, lines, degree, groups, tmp_edge, added_centers, adj, (int)i));
evaluation(nodes, based_nodes, groups, lines, degree, adj, &diam, &ASPL, added_centers, algo);
}
timer_stop(TIMER_ESTIMATED);
free(tmp_edge);
free(adj);
return timer_read(TIMER_ESTIMATED)/ESTIMATED_TIMES;
}
// This function is mainly useful when groupe is 1.
void check_current_edge(const int nodes, const int degree, const int lines, const int groups, const int based_nodes,
int edge[lines][2], const double low_ASPL, const int added_centers, const int algo)
{
int diam; // Not use
double ASPL;
int (*adj)[degree] = malloc(sizeof(int)*nodes*degree); // int adj[nodes][degree];
create_adj(nodes, lines, degree, (const int (*)[2])edge, adj);
if(! evaluation(nodes, based_nodes, groups, lines, degree, (int *)adj, &diam, &ASPL, added_centers, algo))
ERROR("The input file has a node which is never reached by another node.\n");
if(ASPL == low_ASPL)
END("The input file has already optimum solution.\n");
free(adj);
}
|
PoW.c | /* Copyright 2016-2018 The Ulord Core Foundation */
#include "PoW.h"
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
// #include <omp.h>
#include "my_time.h"
#include "common.h"
#include "my_rand48_r.h"
#include "oneWayFunction.h"
const uint8_t sizeof_uint8_t = sizeof(uint8_t);
//#define SSE_VERSION
/*
* Step 1: Initialize working memory.
*/
void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) {
uint32_t i, j;
uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN];
funcInfor[0].func(input, inputLen, a);
uint64_t randSeed[4] = {0, 0, 0, 0};
#ifndef SSE_VERSION
struct my_rand48_data randBuffer[4];
#else
struct vrand48_data randBuffer[2];
#endif
const uint32_t iterNum = WORK_MEMORY_SIZE >> 5;
for (i = 0; i < iterNum; ++i) {
// K = 128
//if (i % K) {
if (i & 0x7F) {
#ifndef SSE_VERSION
uint64_t num = 0;
for (j = 0; j < 4; ++j) {
my_rand64_r(&randBuffer[j], &num);
//memcpy(b + (j << 3), (uint8_t *)&num, sizeof(uint8_t) << 3);
memcpy(b + (j << 3), (uint8_t *)&num, sizeof_uint8_t << 3);
}
#else
vrand64(b, randBuffer);
#endif
uint8_t shift_num;
uint8_t result[OUTPUT_LEN];
reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8);
rrs(b, OUTPUT_LEN, result, shift_num);
//memcpy(Maddr + (i << 5), result, OUTPUT_LEN*sizeof(uint8_t));
memcpy(Maddr + (i << 5), result, sizeof_uint8_t << 5);
for (j = 0; j < 32; ++j) {
a[j] ^= result[j];
}
} else {
uint8_t t = 0, shift_num = 0;
reduce_bit(a, 32, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8);
uint8_t a_rrs[INPUT_LEN];
rrs(a, OUTPUT_LEN, a_rrs, shift_num);
funcInfor[t].func(a_rrs, 32, a);
reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48);
reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48);
reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48);
reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48);
#ifndef SSE_VERSION
my_seed48_r(randSeed[0], &randBuffer[0]);
my_seed48_r(randSeed[1], &randBuffer[1]);
my_seed48_r(randSeed[2], &randBuffer[2]);
my_seed48_r(randSeed[3], &randBuffer[3]);
#else
vseed48(randSeed , &randBuffer[0]);
vseed48(randSeed + 2, &randBuffer[1]);
#endif
//memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t));
memcpy(Maddr + (i << 5), a, sizeof_uint8_t << 5);
}
}
}
/*
* Step 2: Modify the working memory contents.
*/
void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C,
uint8_t *result) {
uint32_t i, j;
uint8_t a[OUTPUT_LEN], b[64];
funcInfor[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a);
//memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t));
memcpy(result, a, sizeof_uint8_t << 5);
uint64_t r = 0;
reduce_bit(a, 32, (uint8_t *)&r, 64);
const uint32_t iterNum = L << 6;
for (i = 0; i < C; ++i) {
uint64_t randSeed = 0;
reduce_bit(a, 32, (uint8_t *)&randSeed, 48);
struct my_rand48_data randBuffer;
my_seed48_r(randSeed, &randBuffer);
uint8_t t1, t2, s;
uint64_t randNum = 0, base = 0;
for (j = 0; j < iterNum; ++j) {
my_rand48_r(&randBuffer, &randNum);
base = randNum + r;
uint64_t offset = 0;
reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8);
offset = (offset << 8) + 1;
//#define WORK_MEMORY_SIZE (1024*1024) 0x100000
//uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE;
uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) & 0xFFFFF;
//uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE;
uint64_t addr2 = (base + offset) & 0xFFFFF;
t1 = Maddr[addr1];
t2 = Maddr[addr2];
s = a[j & 0x1f];
Maddr[addr1] = t2 ^ s;
Maddr[addr2] = t1 ^ s;
b[j & 0x3f] = t1 ^ t2;
r = r + s + t1 + t2;
}
uint8_t t = 0;
reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit(b, 64, a, 256);
uint8_t shift_num = 0;
uint64_t ir = r + i;
reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8);
uint8_t a_rrs[INPUT_LEN];
rrs(a, OUTPUT_LEN, a_rrs, shift_num);
funcInfor[t].func(a_rrs, 32, a);
for (j = 0; j < OUTPUT_LEN; ++j) {
result[j] ^= a[j];
}
}
}
/*
* Step 3: Calculate the final result.
*/
void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *result) {
uint32_t i = 0, j = 0, k = 0;
//memcpy(result, c, OUTPUT_LEN*sizeof(uint8_t));
memcpy(result, c, sizeof_uint8_t << 5);
const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1;
uint32_t it = 0;
uint8_t result_rrs[OUTPUT_LEN];
while(1) {
uint8_t t = 0, shift_num = 0;
uint32_t d = 0;
reduce_bit(result, 32, (uint8_t *)&t, 8);
t = (t & 0x0f) ^ (t >> 4);
reduce_bit(result, 32, (uint8_t *)&d, D);
++d;
for (j = 0; j < d; ++j) {
uint32_t index = i << 5;
for (k = 0; k < 32; ++k) {
result[k] ^= Maddr[index + k];
}
++i;
if (i == num) {
it = i + t;
reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8);
rrs(result, OUTPUT_LEN, result_rrs, shift_num);
funcInfor[0].func(result_rrs, 32, result);
return;
}
}
it = t + i;
reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8);
rrs(result, OUTPUT_LEN, result_rrs, shift_num);
funcInfor[t].func(result_rrs, 32, result);
}
}
/*
* Correctness & Performance test for Proof of work
*/
/*
void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) {
int64_t j;
uint32_t inputLen = messLen;
uint8_t input[INPUT_LEN], output[OUTPUT_LEN];
memset(input, 0, INPUT_LEN*sizeof(uint8_t));
memcpy(input, mess, messLen*sizeof(char));
// Init all one-way function
initOneWayFunction();
uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t));
assert(NULL != Maddr);
memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t));
printf("****************************** Correctness test (PoW function) ******************************\n");
printf("Test message: %s\n", mess);
powFunction(input, inputLen, Maddr, output);
view_data_u8("PoW", output, OUTPUT_LEN);
printf("*********************************************************************************************\n");
printf("*************************************************** Performance test (PoW function) ***************************************************\n");
uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t));
assert(NULL != result);
memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t));
uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64};
uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t);
printf(" %-18s", "Algorithm");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix)
printf("%12d", threadNumArr[ix]);
printf("\n");
printf("00 %-18s\t", "PoW");
for (uint32_t ix = 0; ix < threadNumTypes; ++ix) {
omp_set_num_threads(threadNumArr[ix]);
double startTime = get_wall_time();
if (threadNumArr[ix] == 1) {
for (j = 0; j < iterNum; ++j) {
powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN);
}
} else {
#pragma omp parallel for firstprivate(input), private(j) shared(result)
for (j = 0; j < iterNum; ++j) {
powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN);
}
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
printf("%5.0f bps ", iterNum / costTime); fflush(stdout);
// Check result
for (j = 0; j < iterNum; j += 1) {
if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) {
printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j);
view_data_u8("output", output, OUTPUT_LEN);
view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN);
abort();
}
}
}
printf("\n");
printf("***************************************************************************************************************************************\n");
if (NULL != result) {
free(result);
result = NULL;
}
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
*/
#define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL)
#define MAX_TEST_INPUT_LEN 140
#define MAX_OUT_FILE_NAME_LEN 25
const char testInputCase[][MAX_TEST_INPUT_LEN] = {
"",
"HelloWorld",
"0123456789"
};
void powNistTest(const char *outFileName) {
const uint64_t iterNum = 1024UL * 1024UL;
// const uint64_t iterNum = 1024UL;
uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t));
assert(NULL != outputBuffer);
memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t));
uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t));
assert(NULL != Maddr);
memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t));
initOneWayFunction();
uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]);
for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) {
char curOutFileName[MAX_OUT_FILE_NAME_LEN] = "";
sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx);
FILE *fp = NULL;
if (NULL != (fp = fopen(curOutFileName, "wb"))) {
const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]);
uint8_t input[MAX_TEST_INPUT_LEN];
memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t));
memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t));
double startTime = get_wall_time();
powFunction(input, testInputCaseLen, Maddr, outputBuffer);
for (uint64_t i = 1, j = 0; i < iterNum; ++i) {
memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t));
j += OUTPUT_LEN;
powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j);
/* if (j == OUTPUT_BUFFER_SIZE) {
fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp);
j = 0;
} */
}
double endTime = get_wall_time();
double costTime = endTime - startTime;
fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %lu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \
testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout);
fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp);
fclose(fp);
} else {
fprintf(stderr, "Error: Open %s failed!\n", curOutFileName);
abort();
}
}
if (NULL != outputBuffer) {
free(outputBuffer);
outputBuffer = NULL;
}
if (NULL != Maddr) {
free(Maddr);
Maddr = NULL;
}
}
|
convolution_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 25 + q * 25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* r4 = img0 + w * 4;
const float* r5 = img0 + w * 5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0 + 4);
float32x4_t _k891011 = vld1q_f32(kernel0 + 8);
float32x4_t _k12131415 = vld1q_f32(kernel0 + 12);
float32x4_t _k16171819 = vld1q_f32(kernel0 + 16);
float32x4_t _k20212223 = vld1q_f32(kernel0 + 20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
#endif // __ARM_NEON
int i = 0;
for (; i + 1 < outh; i += 2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// v11 = rx1 / rx3
// v12 = rx2
// v13 v14 = intermediate sum register
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n" // v7 = out
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.4s}, [%2] \n" // v8 = out2
// r1
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v9.4s, v10.4s}, [%4] \n" // v9 v10 = r10 r14
"add %4, %4, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r11
"fmul v13.4s, v9.4s, %19.s[1] \n"
"fmla v8.4s, v9.4s, %18.s[0] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r12
"fmla v7.4s, v11.4s, %19.s[2] \n"
"fmul v14.4s, v11.4s, %18.s[1] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r13
"fmla v13.4s, v12.4s, %19.s[3] \n"
"fmla v8.4s, v12.4s, %18.s[2] \n"
"fmla v7.4s, v11.4s, %20.s[0] \n"
"fmla v14.4s, v11.4s, %18.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v13.4s, v10.4s, %20.s[1] \n"
"fmla v8.4s, v10.4s, %19.s[0] \n"
// r2
"ld1 {v9.4s, v10.4s}, [%5] \n" // v9 v10 = r20 r24
"add %5, %5, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r21
"fmla v7.4s, v9.4s, %20.s[2] \n"
"fmla v14.4s, v9.4s, %19.s[1] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r22
"fmla v13.4s, v11.4s, %20.s[3] \n"
"fmla v8.4s, v11.4s, %19.s[2] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r23
"fmla v7.4s, v12.4s, %21.s[0] \n"
"fmla v14.4s, v12.4s, %19.s[3] \n"
"fmla v13.4s, v11.4s, %21.s[1] \n"
"fmla v8.4s, v11.4s, %20.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v7.4s, v10.4s, %21.s[2] \n"
"fmla v14.4s, v10.4s, %20.s[1] \n"
// r3
"ld1 {v9.4s, v10.4s}, [%6] \n" // v9 v10 = r30 r34
"add %6, %6, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r31
"fmla v13.4s, v9.4s, %21.s[3] \n"
"fmla v8.4s, v9.4s, %20.s[2] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r32
"fmla v7.4s, v11.4s, %22.s[0] \n"
"fmla v14.4s, v11.4s, %20.s[3] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r33
"fmla v13.4s, v12.4s, %22.s[1] \n"
"fmla v8.4s, v12.4s, %21.s[0] \n"
"fmla v7.4s, v11.4s, %22.s[2] \n"
"fmla v14.4s, v11.4s, %21.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"fmla v13.4s, v10.4s, %22.s[3] \n"
"fmla v8.4s, v10.4s, %21.s[2] \n"
// r4
"ld1 {v9.4s, v10.4s}, [%7] \n" // v9 v10 = r40 r44
"add %7, %7, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r41
"fmla v7.4s, v9.4s, %23.s[0] \n"
"fmla v14.4s, v9.4s, %21.s[3] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r41
"fmla v13.4s, v11.4s, %23.s[1] \n"
"fmla v8.4s, v11.4s, %22.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r41
"fmla v7.4s, v12.4s, %23.s[2] \n"
"fmla v14.4s, v12.4s, %22.s[1] \n"
"fmla v13.4s, v11.4s, %23.s[3] \n"
"fmla v8.4s, v11.4s, %22.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmla v7.4s, v10.4s, %24.s[0] \n"
"fmla v14.4s, v10.4s, %22.s[3] \n"
// r0 and r5
"ld1 {v9.4s, v10.4s}, [%3] \n" // v9 v10 = r00 r04
"add %3, %3, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r01
"fmla v13.4s, v11.4s, %18.s[1] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r02
"fmla v7.4s, v12.4s, %18.s[2] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r03
"prfm pldl1keep, [%8, #256] \n"
"fmla v13.4s, v11.4s, %18.s[3] \n"
// r5
"ld1 {v11.4s, v12.4s}, [%8] \n" // v11 v12 = r50 r54
"add %8, %8, #16 \n"
"fmla v8.4s, v11.4s, %23.s[0] \n"
"fmla v14.4s, v12.4s, %24.s[0] \n"
"fmla v7.4s, v9.4s, %18.s[0] \n"
"fmla v13.4s, v10.4s, %19.s[0] \n"
"ext v9.16b, v11.16b, v12.16b, #4 \n" //r51
"ext v10.16b, v11.16b, v12.16b, #8 \n" //r52
"fmla v14.4s, v9.4s, %23.s[1] \n"
"ext v9.16b, v11.16b, v12.16b, #12 \n" //r53
"fmla v8.4s, v10.4s, %23.s[2] \n"
"fmla v14.4s, v9.4s, %23.s[3] \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"st1 {v7.4s}, [%1], #16 \n"
"fadd v8.4s, v8.4s, v14.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n" // v7 = out
"st1 {v8.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424) // %24
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
// "veor q13, q13 \n"
// "veor q14, q14 \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n" // q7 = out
"0: \n"
// q11 = rx1 / rx3
// q12 = rx2
// q13 q14 = intermediate sum register
"pld [%2, #128] \n"
"vld1.f32 {d16-d17}, [%2] \n" // q8 = out2
"pld [%4, #256] \n"
// r1
"vld1.f32 {d18-d21}, [%4] \n" // q9 q10 = r10 r14
"add %4, #16 \n"
"vext.32 q11, q9, q10, #1 \n" // r11
"vmul.f32 q13, q9, %e19[1] \n"
"vmla.f32 q8, q9, %e18[0] \n"
"vext.32 q12, q9, q10, #2 \n" // r12
"vmla.f32 q7, q11, %f19[0] \n"
"vmul.f32 q14, q11, %e18[1] \n"
"vext.32 q11, q9, q10, #3 \n" // r13
"vmla.f32 q13, q12, %f19[1] \n"
"vmla.f32 q8, q12, %f18[0] \n"
"vmla.f32 q7, q11, %e20[0] \n"
"vmla.f32 q14, q11, %f18[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q13, q10, %e20[1] \n"
"vmla.f32 q8, q10, %e19[0] \n"
// r2
"vld1.f32 {d18-d21}, [%5] \n" // q9 q10 = r20 r24
"add %5, #16 \n"
"vext.32 q11, q9, q10, #1 \n" // r21
"vmla.f32 q7, q9, %f20[0] \n"
"vmla.f32 q14, q9, %e19[1] \n"
"vext.32 q12, q9, q10, #2 \n" // r22
"vmla.f32 q13, q11, %f20[1] \n"
"vmla.f32 q8, q11, %f19[0] \n"
"vext.32 q11, q9, q10, #3 \n" // r23
"vmla.f32 q7, q12, %e21[0] \n"
"vmla.f32 q14, q12, %f19[1] \n"
"vmla.f32 q13, q11, %e21[1] \n"
"vmla.f32 q8, q11, %e20[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q7, q10, %f21[0] \n"
"vmla.f32 q14, q10, %e20[1] \n"
// r3
"vld1.f32 {d18-d21}, [%6] \n" // q9 q10 = r30 r34
"add %6, #16 \n"
"vext.32 q11, q9, q10, #1 \n" // r31
"vmla.f32 q13, q9, %f21[1] \n"
"vmla.f32 q8, q9, %f20[0] \n"
"vext.32 q12, q9, q10, #2 \n" // r32
"vmla.f32 q7, q11, %e22[0] \n"
"vmla.f32 q14, q11, %f20[1] \n"
"vext.32 q11, q9, q10, #3 \n" // r33
"vmla.f32 q13, q12, %e22[1] \n"
"vmla.f32 q8, q12, %e21[0] \n"
"vmla.f32 q7, q11, %f22[0] \n"
"vmla.f32 q14, q11, %e21[1] \n"
"pld [%7, #256] \n"
"vmla.f32 q13, q10, %f22[1] \n"
"vmla.f32 q8, q10, %f21[0] \n"
// r4
"vld1.f32 {d18-d21}, [%7] \n" // q9 q10 = r40 r44
"add %7, #16 \n"
"vext.32 q11, q9, q10, #1 \n" // r41
"vmla.f32 q7, q9, %e23[0] \n"
"vmla.f32 q14, q9, %f21[1] \n"
"vext.32 q12, q9, q10, #2 \n" // r42
"vmla.f32 q13, q11, %e23[1] \n"
"vmla.f32 q8, q11, %e22[0] \n"
"vext.32 q11, q9, q10, #3 \n" // r43
"vmla.f32 q7, q12, %f23[0] \n"
"vmla.f32 q14, q12, %e22[1] \n"
"vmla.f32 q13, q11, %f23[1] \n"
"vmla.f32 q8, q11, %f22[0] \n"
"pld [%3, #256] \n"
"vmla.f32 q7, q10, %e24[0] \n"
"vmla.f32 q14, q10, %f22[1] \n"
// r0 and r5
"vld1.f32 {d18-d21}, [%3] \n" // q9 q10 = r00 r04
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n" // r01
"vmla.f32 q13, q11, %e18[1] \n"
"vext.32 q12, q9, q10, #2 \n" // r02
"vmla.f32 q7, q12, %f18[0] \n"
"vext.32 q11, q9, q10, #3 \n" // r03
"pld [%8, #256] \n"
"vmla.f32 q13, q11, %f18[1] \n"
// r5
"vld1.f32 {d22-d25}, [%8] \n" // q11 q12 = r50 r54
"add %8, #16 \n"
"vmla.f32 q8, q11, %e23[0] \n"
"vmla.f32 q14, q12, %e24[0] \n"
"vmla.f32 q7, q9, %e18[0] \n"
"vmla.f32 q13, q10, %e19[0] \n"
"vext.32 q9, q11, q12, #1 \n" // r51
"vext.32 q10, q11, q12, #2 \n" // r52
"vmla.f32 q14, q9, %e23[1] \n"
"vext.32 q9, q11, q12, #3 \n" // r53
"vmla.f32 q8, q10, %f23[0] \n"
"vmla.f32 q14, q9, %f23[1] \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q13, q13 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vadd.f32 q8, q8, q14 \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n" // q7 = out
// "veor q14, q14 \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424) // %24
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
#if __ARM_NEON
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _sum = vmulq_f32(_r1, _k1);
float32x4_t _sum2 = vmulq_f32(_r1, _k0123);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _k2 = vld1q_f32(k2);
_sum = vmlaq_f32(_sum, _r2, _k2);
_sum2 = vmlaq_f32(_sum2, _r2, _k1);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k3 = vld1q_f32(k3);
_sum = vmlaq_f32(_sum, _r3, _k3);
_sum2 = vmlaq_f32(_sum2, _r3, _k2);
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
_sum2 = vmlaq_f32(_sum2, _r4, _k3);
float32x4_t _r0 = vld1q_f32(r0);
_sum = vmlaq_f32(_sum, _r0, _k0123);
float32x4_t _r5 = vld1q_f32(r5);
_sum2 = vmlaq_f32(_sum2, _r5, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum = r4[4] * k4[4];
_r_t4 = vextq_f32(_r_t4, _r_t4, 1);
_r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3);
_sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4);
sum2 = r5[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2);
sum += vget_lane_f32(_ss_ss2, 0);
sum2 += vget_lane_f32(_ss_ss2, 1);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
#endif // __ARM_NEON
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n" // _r00 = vld1q_f32(r0+j);
"add %2, %2, #16 \n"
"0: \n"
"ld1 {v7.4s}, [%1] \n" // _sum = vld1q_f32(outptr+j);
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r01
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r02
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r03
"fmla v7.4s, v8.4s, %14.s[0] \n"
"fmul v13.4s, v10.4s, %14.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmul v14.4s, v11.4s, %14.s[2] \n"
"fmul v15.4s, v12.4s, %14.s[3] \n"
"fmla v7.4s, v9.4s, %15.s[0] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n"
"add %3, %3, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r11
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r12
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r13
"fmla v7.4s, v8.4s, %15.s[1] \n"
"fmla v13.4s, v10.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"fmla v14.4s, v11.4s, %15.s[3] \n"
"fmla v15.4s, v12.4s, %16.s[0] \n"
"fmla v7.4s, v9.4s, %16.s[1] \n"
"ld1 {v8.4s, v9.4s}, [%4] \n"
"add %4, %4, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r21
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r22
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r23
"fmla v7.4s, v8.4s, %16.s[2] \n"
"fmla v13.4s, v10.4s, %16.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v14.4s, v11.4s, %17.s[0] \n"
"fmla v15.4s, v12.4s, %17.s[1] \n"
"fmla v7.4s, v9.4s, %17.s[2] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n"
"add %5, %5, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r31
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r32
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r33
"fmla v7.4s, v8.4s, %17.s[3] \n"
"fmla v13.4s, v10.4s, %18.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v14.4s, v11.4s, %18.s[1] \n"
"fmla v15.4s, v12.4s, %18.s[2] \n"
"fmla v7.4s, v9.4s, %18.s[3] \n"
"ld1 {v8.4s, v9.4s}, [%6] \n"
"add %6, %6, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r41
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r42
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r43
"fmla v7.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v10.4s, %19.s[1] \n"
"fmla v14.4s, v11.4s, %19.s[2] \n"
"fmla v15.4s, v12.4s, %19.s[3] \n"
"fmla v7.4s, v9.4s, %20.s[0] \n"
"fadd v14.4s, v14.4s, v15.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ld1 {v8.4s, v9.4s}, [%2] \n"
"add %2, %2, #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%1, #128] \n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2] \n" // _r00 = vld1q_f32(r0+j);
"add %2, #16 \n"
"0: \n"
"vld1.f32 {d14-d15}, [%1] \n" // _sum = vld1q_f32(outptr+j);
// "veor q13, q13 \n"// _sum2 = 0;
// "veor q14, q14 \n"// _sum3 = 0;
"vext.32 q10, q8, q9, #1 \n" // _r01
"vext.32 q11, q8, q9, #2 \n" // _r02
"vext.32 q12, q8, q9, #3 \n" // _r03
"vmla.f32 q7, q8, %e14[0] \n"
"vmul.f32 q13, q10, %e14[1] \n"
"pld [%3, #256] \n"
"vmul.f32 q14, q11, %f14[0] \n"
"vmul.f32 q15, q12, %f14[1] \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vld1.f32 {d16-d19}, [%3] \n"
"add %3, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %e15[1] \n"
"vmla.f32 q13, q10, %f15[0] \n"
"pld [%4, #256] \n"
"vmla.f32 q14, q11, %f15[1] \n"
"vmla.f32 q15, q12, %e16[0] \n"
"vmla.f32 q7, q9, %e16[1] \n"
"vld1.f32 {d16-d19}, [%4] \n"
"add %4, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %f16[0] \n"
"vmla.f32 q13, q10, %f16[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q14, q11, %e17[0] \n"
"vmla.f32 q15, q12, %e17[1] \n"
"vmla.f32 q7, q9, %f17[0] \n"
"vld1.f32 {d16-d19}, [%5] \n"
"add %5, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %f17[1] \n"
"vmla.f32 q13, q10, %e18[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q14, q11, %e18[1] \n"
"vmla.f32 q15, q12, %f18[0] \n"
"vmla.f32 q7, q9, %f18[1] \n"
"vld1.f32 {d16-d19}, [%6] \n"
"add %6, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %e19[0] \n"
"vmla.f32 q13, q10, %e19[1] \n"
"vmla.f32 q14, q11, %f19[0] \n"
"vmla.f32 q15, q12, %f19[1] \n"
"vmla.f32 q7, q9, %e20[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%2, #256] \n"
"vadd.f32 q7, q7, q14 \n"
"vld1.f32 {d16-d19}, [%2] \n" // _r00 = vld1q_f32(r0+j);
"add %2, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
float sum = 0;
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
float32x4_t _k_t4;
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4;
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum = r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
}
static void conv5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 25 + q * 25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* r4 = img0 + w * 4;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0 + 4);
float32x4_t _k891011 = vld1q_f32(kernel0 + 8);
float32x4_t _k12131415 = vld1q_f32(kernel0 + 12);
float32x4_t _k16171819 = vld1q_f32(kernel0 + 16);
float32x4_t _k20212223 = vld1q_f32(kernel0 + 20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2], #32 \n" // v8 = 0 2 4 6 q9 = 1 3 5 7
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v10.4s, v11.4s}, [%2] \n" // v10 = 8 10 12 14 v11 = 9 11 13 15
"prfm pldl1keep, [%1, #128] \n"
"0: \n"
"ld1 {v7.4s}, [%1] \n" // v7 = outptr
"ext v12.16b, v8.16b, v10.16b, #4 \n" // v12 = 2 4 6 8
"ext v11.16b, v9.16b, v11.16b, #4 \n" // v11 = 3 5 7 9
"ext v10.16b, v8.16b, v10.16b, #8 \n" // v10 = 4 6 8 10
"fmla v7.4s, v8.4s, %14.s[0] \n"
"fmul v13.4s, v9.4s, %14.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmul v14.4s, v12.4s, %14.s[2] \n"
"fmul v15.4s, v11.4s, %14.s[3] \n"
"fmla v7.4s, v10.4s, %15.s[0] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v10.4s, v11.4s}, [%3] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %15.s[1] \n"
"fmla v13.4s, v9.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"fmla v14.4s, v12.4s, %15.s[3] \n"
"fmla v15.4s, v11.4s, %16.s[0] \n"
"fmla v7.4s, v10.4s, %16.s[1] \n"
"ld2 {v8.4s, v9.4s}, [%4], #32 \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v10.4s, v11.4s}, [%4] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %16.s[2] \n"
"fmla v13.4s, v9.4s, %16.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v14.4s, v12.4s, %17.s[0] \n"
"fmla v15.4s, v11.4s, %17.s[1] \n"
"fmla v7.4s, v10.4s, %17.s[2] \n"
"ld2 {v8.4s, v9.4s}, [%5], #32 \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v10.4s, v11.4s}, [%5] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %17.s[3] \n"
"fmla v13.4s, v9.4s, %18.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v14.4s, v12.4s, %18.s[1] \n"
"fmla v15.4s, v11.4s, %18.s[2] \n"
"fmla v7.4s, v10.4s, %18.s[3] \n"
"ld2 {v8.4s, v9.4s}, [%6], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v10.4s, v11.4s}, [%6] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v9.4s, %19.s[1] \n"
"fmla v14.4s, v12.4s, %19.s[2] \n"
"fmla v15.4s, v11.4s, %19.s[3] \n"
"fmla v7.4s, v10.4s, %20.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2], #32 \n"
"fadd v14.4s, v14.4s, v15.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ld2 {v10.4s, v11.4s}, [%2] \n"
"st1 {v7.4s}, [%1], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
// "veor q15, q15 \n"// _sump3 = 0;
// "veor q13, q13 \n"// _sump2 = 0;
// "veor q14, q14 \n"// _sump3 = 0;
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n" // q8 = 0 2 4 6 q9 = 1 3 5 7
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2] \n" // q10 = 8 10 12 14 q11 = 9 11 13 15
"pld [%1, #128] \n"
"0: \n"
"vld1.f32 {d14-d15}, [%1] \n" // q7 = outptr
"vext.32 q12, q8, q10, #1 \n" // q12 = 2 4 6 8
"vext.32 q11, q9, q11, #1 \n" // q11 = 3 5 7 9
"vext.32 q10, q8, q10, #2 \n" // q10 = 4 6 8 10
"vmla.f32 q7, q8, %e14[0] \n"
"vmul.f32 q13, q9, %e14[1] \n"
"pld [%3, #256] \n"
"vmul.f32 q14, q12, %f14[0] \n"
"vmul.f32 q15, q11, %f14[1] \n"
"vmla.f32 q7, q10, %e15[0] \n"
"vld2.f32 {d16-d19}, [%3]! \n"
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %e15[1] \n"
"vmla.f32 q13, q9, %f15[0] \n"
"pld [%4, #256] \n"
"vmla.f32 q14, q12, %f15[1] \n"
"vmla.f32 q15, q11, %e16[0] \n"
"vmla.f32 q7, q10, %e16[1] \n"
"vld2.f32 {d16-d19}, [%4]! \n"
"pld [%4, #256] \n"
"vld2.f32 {d20-d23}, [%4] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %f16[0] \n"
"vmla.f32 q13, q9, %f16[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q14, q12, %e17[0] \n"
"vmla.f32 q15, q11, %e17[1] \n"
"vmla.f32 q7, q10, %f17[0] \n"
"vld2.f32 {d16-d19}, [%5]! \n"
"pld [%5, #256] \n"
"vld2.f32 {d20-d23}, [%5] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %f17[1] \n"
"vmla.f32 q13, q9, %e18[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q14, q12, %e18[1] \n"
"vmla.f32 q15, q11, %f18[0] \n"
"vmla.f32 q7, q10, %f18[1] \n"
"vld2.f32 {d16-d19}, [%6]! \n"
"pld [%6, #256] \n"
"vld2.f32 {d20-d23}, [%6] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %e19[0] \n"
"vmla.f32 q13, q9, %e19[1] \n"
"vmla.f32 q14, q12, %f19[0] \n"
"vmla.f32 q15, q11, %f19[1] \n"
"vmla.f32 q7, q10, %e20[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n" // q8 = 0 2 4 6 q9 = 1 3 5 7
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q15, q15 \n"// _sump3 = 0;
// "veor q13, q13 \n"// _sump2 = 0;
"pld [%2, #256] \n"
"vadd.f32 q7, q7, q14 \n"
"vld2.f32 {d20-d23}, [%2] \n" // q10 = 8 10 12 14 q11 = 9 11 13 15
// "veor q14, q14 \n"// _sump3 = 0;
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
float sum = 0;
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
sum += r0[4] * k0[4];
sum += r1[4] * k1[4];
sum += r2[4] * k2[4];
sum += r3[4] * k3[4];
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
|
prior_box_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/transform.h"
namespace paddle {
namespace operators {
constexpr int kPriorBoxFLOAT = 1;
constexpr int kPriorBoxDOUBLE = 2;
inline void ExpandAspectRatios(const std::vector<float>& input_aspect_ratior,
bool flip,
std::vector<float>* output_aspect_ratior) {
constexpr float epsilon = 1e-6;
output_aspect_ratior->clear();
output_aspect_ratior->push_back(1.0f);
for (size_t i = 0; i < input_aspect_ratior.size(); ++i) {
float ar = input_aspect_ratior[i];
bool already_exist = false;
for (size_t j = 0; j < output_aspect_ratior->size(); ++j) {
if (fabs(ar - output_aspect_ratior->at(j)) < epsilon) {
already_exist = true;
break;
}
}
if (!already_exist) {
output_aspect_ratior->push_back(ar);
if (flip) {
output_aspect_ratior->push_back(1.0f / ar);
}
}
}
}
template <typename T, typename K>
class PriorBoxOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<paddle::framework::Tensor>("Input");
auto* image = ctx.Input<paddle::framework::Tensor>("Image");
auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes");
auto* vars = ctx.Output<paddle::framework::Tensor>("Variances");
auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes");
auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes");
auto input_aspect_ratio = ctx.Attr<std::vector<float>>("aspect_ratios");
auto variances = ctx.Attr<std::vector<float>>("variances");
auto flip = ctx.Attr<bool>("flip");
auto clip = ctx.Attr<bool>("clip");
auto min_max_aspect_ratios_order =
ctx.Attr<bool>("min_max_aspect_ratios_order");
std::vector<float> aspect_ratios;
ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios);
K step_w = static_cast<K>(ctx.Attr<float>("step_w"));
K step_h = static_cast<K>(ctx.Attr<float>("step_h"));
K offset = static_cast<K>(ctx.Attr<float>("offset"));
auto img_width = image->dims()[3];
auto img_height = image->dims()[2];
auto feature_width = input->dims()[3];
auto feature_height = input->dims()[2];
K step_width, step_height;
if (step_w == 0 || step_h == 0) {
step_width = static_cast<K>(img_width) / feature_width;
step_height = static_cast<K>(img_height) / feature_height;
} else {
step_width = step_w;
step_height = step_h;
}
int num_priors = aspect_ratios.size() * min_sizes.size();
if (max_sizes.size() > 0) {
num_priors += max_sizes.size();
}
boxes->mutable_data<K>(ctx.GetPlace());
vars->mutable_data<K>(ctx.GetPlace());
K* b_t = boxes->data<K>();
for (int h = 0; h < feature_height; ++h) {
for (int w = 0; w < feature_width; ++w) {
K center_x = (w + offset) * step_width;
K center_y = (h + offset) * step_height;
K box_width, box_height;
for (size_t s = 0; s < min_sizes.size(); ++s) {
auto min_size = min_sizes[s];
if (min_max_aspect_ratios_order) {
box_width = box_height = min_size / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
if (max_sizes.size() > 0) {
auto max_size = max_sizes[s];
// square prior with size sqrt(minSize * maxSize)
box_width = box_height = sqrt(min_size * max_size) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
// priors with different aspect ratios
for (size_t r = 0; r < aspect_ratios.size(); ++r) {
float ar = aspect_ratios[r];
if (fabs(ar - 1.) < 1e-6) {
continue;
}
box_width = min_size * sqrt(ar) / 2.;
box_height = min_size / sqrt(ar) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
} else {
// priors with different aspect ratios
for (size_t r = 0; r < aspect_ratios.size(); ++r) {
float ar = aspect_ratios[r];
box_width = min_size * sqrt(ar) / 2.;
box_height = min_size / sqrt(ar) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
if (max_sizes.size() > 0) {
auto max_size = max_sizes[s];
// square prior with size sqrt(minSize * maxSize)
box_width = box_height = sqrt(min_size * max_size) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
}
}
}
}
if (clip) {
K* dt = boxes->data<K>();
std::transform(dt, dt + boxes->numel(), dt, [](K v) -> K {
return std::min<K>(std::max<K>(v, 0.), 1.);
});
}
framework::Tensor var_t;
var_t.mutable_data<K>(
framework::make_ddim({1, static_cast<int>(variances.size())}),
ctx.GetPlace());
auto var_et = framework::EigenTensor<K, 2>::From(var_t);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (size_t i = 0; i < variances.size(); ++i) {
var_et(0, i) = variances[i];
}
int box_num = feature_height * feature_width * num_priors;
auto var_dim = vars->dims();
vars->Resize({box_num, static_cast<int>(variances.size())});
auto e_vars = framework::EigenMatrix<K, Eigen::RowMajor>::From(*vars);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
for (int i = 0; i < box_num; ++i) {
for (size_t j = 0; j < variances.size(); ++j) {
e_vars(i, j) = variances[j];
}
}
vars->Resize(var_dim);
}
}; // namespace operators
} // namespace operators
} // namespace paddle
|
sections1.c | #include <omp.h>
#include <stdio.h>
int main( )
{
int section_count = 0;
#pragma omp parallel
#pragma omp sections firstprivate( section_count ) lastprivate(section_count)
{
#pragma omp section
{
section_count++;
printf( "section_count %d\n", section_count );
}
#pragma omp section
{
section_count++;
printf( "section_count %d\n", section_count );
}
}
return 0;
}
|
grid_basis.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "cint.h"
#include "config.h"
#include "gto/grid_ao_drv.h"
#include "np_helper/np_helper.h"
#define MAX_THREADS 256
void VXCnr_ao_screen(unsigned char *non0table, double *coords, int ngrids,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
int i, j;
int np, nc, atm_id;
size_t bas_id, ib;
double rr, arr, maxc;
double logcoeff[NPRIMAX];
double dr[3];
double *p_exp, *pcoeff, *ratm;
for (bas_id = 0; bas_id < nbas; bas_id++) {
np = bas[NPRIM_OF];
nc = bas[NCTR_OF ];
p_exp = env + bas[PTR_EXP];
pcoeff = env + bas[PTR_COEFF];
atm_id = bas[ATOM_OF];
ratm = env + atm[atm_id*ATM_SLOTS+PTR_COORD];
for (j = 0; j < np; j++) {
maxc = 0;
for (i = 0; i < nc; i++) {
maxc = MAX(maxc, fabs(pcoeff[i*np+j]));
}
logcoeff[j] = log(maxc);
}
for (ib = 0; ib < nblk; ib++) {
for (i = ib*BLKSIZE; i < MIN(ngrids, (ib+1)*BLKSIZE); i++) {
dr[0] = coords[0*ngrids+i] - ratm[0];
dr[1] = coords[1*ngrids+i] - ratm[1];
dr[2] = coords[2*ngrids+i] - ratm[2];
rr = dr[0]*dr[0] + dr[1]*dr[1] + dr[2]*dr[2];
for (j = 0; j < np; j++) {
arr = p_exp[j] * rr;
if (arr-logcoeff[j] < EXPCUTOFF) {
non0table[ib*nbas+bas_id] = 1;
goto next_blk;
}
}
}
non0table[ib*nbas+bas_id] = 0;
next_blk:;
}
bas += BAS_SLOTS;
}
}
// 1k grids per block
#define GRIDS_BLOCK 512
void VXCgen_grid(double *out, double *coords, double *atm_coords,
double *radii_table, int natm, int ngrids)
{
const size_t Ngrids = ngrids;
int i, j;
double dx, dy, dz;
double *atom_dist = malloc(sizeof(double) * natm*natm);
for (i = 0; i < natm; i++) {
for (j = 0; j < i; j++) {
dx = atm_coords[i*3+0] - atm_coords[j*3+0];
dy = atm_coords[i*3+1] - atm_coords[j*3+1];
dz = atm_coords[i*3+2] - atm_coords[j*3+2];
atom_dist[i*natm+j] = 1 / sqrt(dx*dx + dy*dy + dz*dz);
}
}
#pragma omp parallel private(i, j, dx, dy, dz)
{
double *grid_dist = malloc(sizeof(double) * natm*GRIDS_BLOCK);
double *buf = malloc(sizeof(double) * natm*GRIDS_BLOCK);
double *g = malloc(sizeof(double) * GRIDS_BLOCK);
size_t ig0, n, ngs;
double fac;
#pragma omp for nowait schedule(static)
for (ig0 = 0; ig0 < Ngrids; ig0 += GRIDS_BLOCK) {
ngs = MIN(Ngrids-ig0, GRIDS_BLOCK);
for (i = 0; i < natm; i++) {
for (n = 0; n < ngs; n++) {
dx = coords[0*Ngrids+ig0+n] - atm_coords[i*3+0];
dy = coords[1*Ngrids+ig0+n] - atm_coords[i*3+1];
dz = coords[2*Ngrids+ig0+n] - atm_coords[i*3+2];
grid_dist[i*GRIDS_BLOCK+n] = sqrt(dx*dx + dy*dy + dz*dz);
buf[i*GRIDS_BLOCK+n] = 1;
} }
for (i = 0; i < natm; i++) {
for (j = 0; j < i; j++) {
fac = atom_dist[i*natm+j];
for (n = 0; n < ngs; n++) {
g[n] = (grid_dist[i*GRIDS_BLOCK+n] -
grid_dist[j*GRIDS_BLOCK+n]) * fac;
}
if (radii_table != NULL) {
fac = radii_table[i*natm+j];
for (n = 0; n < ngs; n++) {
g[n] += fac * (1 - g[n]*g[n]);
}
}
for (n = 0; n < ngs; n++) {
g[n] = (3 - g[n]*g[n]) * g[n] * .5;
}
for (n = 0; n < ngs; n++) {
g[n] = (3 - g[n]*g[n]) * g[n] * .5;
}
for (n = 0; n < ngs; n++) {
g[n] = (3 - g[n]*g[n]) * g[n] * .5;
g[n] *= .5;
}
for (n = 0; n < ngs; n++) {
buf[i*GRIDS_BLOCK+n] *= .5 - g[n];
buf[j*GRIDS_BLOCK+n] *= .5 + g[n];
}
} }
for (i = 0; i < natm; i++) {
for (n = 0; n < ngs; n++) {
out[i*Ngrids+ig0+n] = buf[i*GRIDS_BLOCK+n];
}
}
}
free(g);
free(buf);
free(grid_dist);
}
free(atom_dist);
}
|
cyclic_small_systems.h | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Tridiagonal solvers.
* Host code for cyclic reduction (CR).
*
* NVIDIA, Nikolai Sakharnykh, 2009
*/
#ifndef _CYCLIC_SMALL_SYSTEMS_
#define _CYCLIC_SMALL_SYSTEMS_
#include <omp.h>
#include "tridiagonal.h"
#include "cyclic_kernels.cpp"
const char *cyclicKernelNames[] = {
"cyclic_small_systems_kernel", // original version
"cyclic_branch_free_kernel", // optimized branch-free version
};
double cyclic_small_systems(float *a, float *b, float *c, float *d, float *x,
int system_size, int num_systems, int id = 0)
{
shrLog(" %s\n", cyclicKernelNames[id]);
const unsigned int mem_size = num_systems * system_size;
double sum_time;
#pragma omp target data map(to: a[0:mem_size], \
b[0:mem_size], \
c[0:mem_size], \
d[0:mem_size]) \
map(from: x[0:mem_size])
{
size_t szTeams;
size_t szThreads;
int iterations = my_log2 (system_size/2);
// set execution parameters
szThreads = system_size / 2;
szTeams = num_systems;
if (id == 0)
cyclic_small_systems_kernel(
a, b, c, d, x, system_size, num_systems, iterations, szTeams, szThreads);
else
cyclic_branch_free_kernel(
a, b, c, d, x, system_size, num_systems, iterations, szTeams, szThreads);
shrLog(" looping %i times..\n", BENCH_ITERATIONS);
// run computations on GPUs in parallel
shrDeltaT(0);
for (int iCycles = 0; iCycles < BENCH_ITERATIONS; iCycles++)
{
if (id == 0)
cyclic_small_systems_kernel(
a, b, c, d, x, system_size, num_systems, iterations, szTeams, szThreads);
else
cyclic_branch_free_kernel(
a, b, c, d, x, system_size, num_systems, iterations, szTeams, szThreads);
}
sum_time = shrDeltaT(0);
}
double time = sum_time / BENCH_ITERATIONS;
return time;
}
#endif
|
studentspar.c | // Rodar com a flag -D DEBUG para ativar o modo debug
// #define DEBUG
/*
Este programa calcula as seguintes estatisticas
das notas dos alunos nas escolas das diferentes
cidades do Brasil:
Menor, Maior, Media, Mediana e Desvio Padrao (DP)
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#define NOTA_MAXIMA 101
#define NTHREADS 8
// Reducao definida pelo usuario para possibilitar a obtencao dos indices
// onde se econtra os valores maximos
struct Compare {
double val;
int mr;
int mc;
int mcr;
};
#pragma omp declare reduction(maximum : struct Compare : \
omp_out = omp_in.val > omp_out.val ? omp_in : omp_out)
/*
Entrada do programa:
R = Numero de regioes distintas.
C = Numero de cidades distintas.
A = Numero de alunos por cidade.
SEED = semente usada para gerar numeros aleatorios.
*/
int R, C, A, SEED;
/*
Dados:
alunos = Notas de cada aluno k.
cidades = Notas de cada aluno k de cada cidade j.
regioes = Notas de cada aluno k de cada cidade j
de cada regiao i.
*/
int *alunos;
int **cidades;
int ***regioes;
/*
Vetores de contagem de notas
para o Counting Sort e cálculos
requisitados.
*/
int ***contagem_cidades;
int **contagem_regioes;
int *contagem_pais;
/*
Estatisticas das cidades referentes
a cada cidade j de cada regiao i:
*/
int **menor;
int **maior;
double **mediana;
double **media;
double **DP;
/*
Estatisticas das regioes referentes
a cada regiao i:
*/
int *menor_regiao;
int *maior_regiao;
double *mediana_regiao;
double *media_regiao;
double *DP_regiao;
/*
Estatisticas de todas as regioes
*/
int menor_brasil;
int maior_brasil;
double mediana_brasil;
double media_brasil;
double DP_brasil;
/*
Indice da melhor regiao e da melhor
cidade e sua respectiva regiao:
*/
int melhor_regiao;
int melhor_cidade_regiao;
int melhor_cidade;
void debug() {
int i, j;
R = 3;
C = 4;
A = 6;
SEED = 7;
regioes = (int ***) malloc(sizeof(int **) * R);
for (i = 0; i < R; i++) {
cidades = (int **) malloc(sizeof(int *) * C);
for (j = 0; j < C; j++) {
alunos = (int *) malloc(sizeof(int) * A);
cidades[j] = alunos;
}
regioes[i] = cidades;
}
static const int defaults[3][4][6] = { { // Regiao 0
{30, 40, 20, 80, 85, 10},
{10, 20, 30, 40, 50, 60},
{60, 50, 40, 30, 20, 10},
{70, 55, 35, 80, 95, 27}
},
{ // Regiao 1
{35, 45, 25, 85, 90, 15},
{15, 25, 35, 45, 55, 65},
{65, 55, 45, 35, 25, 15},
{75, 60, 40, 85, 100, 32}
},
{ // Regiao 2
{20, 30, 10, 70, 75, 0},
{0, 10, 20, 30, 40, 50},
{50, 40, 30, 20, 10, 0},
{60, 45, 25, 70, 85, 17}
} };
for (i = 0; i < R; i++) {
for (j = 0; j < C; j++) {
memcpy(regioes[i][j], defaults[i][j], sizeof(int) * A);
}
}
}
void escrever_matriz() {
FILE *fp = NULL;
int i, j, k;
fp = fopen("./matriz_gerada_par.csv", "w");
if (fp == NULL) {
exit(1);
}
for (i = 0; i < R; i++) {
for (j = 0; j < C; j++) {
for (k = 0; k < A; k++) {
fprintf(fp, "%d", regioes[i][j][k]);
if (k < A-1) {
fprintf(fp, ",");
}
}
fprintf(fp, "\n");
}
}
fclose(fp);
}
void counting_sort(int *vetor, int *contagem) {
int i;
for (i = 0; i < A; i++) {
contagem[vetor[i]]++;
}
/* Ordenacao
int j, k;
k = 0;
for (i = 0; i < NOTA_MAXIMA; i++) {
for (j = 0; j < contagem[i]; j++) {
vetor[k++] = i;
}
}
*/
}
int calcula_menor(int *contagem) {
int i;
for (i = 0; i < NOTA_MAXIMA; i++) {
if (contagem[i] > 0) {
return i;
}
}
return -1;
}
int calcula_maior(int *contagem) {
int i;
for (i = NOTA_MAXIMA-1; i >= 0; i--) {
if (contagem[i] > 0) {
return i;
}
}
return -1;
}
double calcula_media(int *contagem, int n) {
int i;
unsigned int soma = 0;
double media;
for (i = 0; i < NOTA_MAXIMA; i++) {
soma += contagem[i] * i;
}
media = (1.0 * soma) / n;
return media;
}
double calcula_mediana(int *contagem, int n) {
int i, ret;
int pos = (n + 1) / 2;
double soma = 0;
for (i = 0; i < NOTA_MAXIMA; i++) {
soma += contagem[i];
if (soma >= pos) {
if ((soma > pos) || ((soma == pos) && (n % 2))) {
return i;
} else { // soma == pos
ret = i * 1.0;
while (soma == pos) {
soma += contagem[++i];
}
return (ret + i) * 1.0 / 2;
}
}
}
return -1.0;
}
/*
Adapted from:
https://cs.uwaterloo.ca/~m32rober/rsqrt.pdf
*/
double fsqrt(double n) {
n = 1.0 / n;
unsigned long long i;
double x, y;
x = n * 0.5;
y = n;
i = *(unsigned long long *)&y;
i = 0x5fe6eb50c7b537a9 - (i >> 1);
y = *(double *)&i;
y = y * (1.5 - (x * y * y));
return y;
}
double calcula_desvio_padrao(int *contagem, int n) {
int i;
double dp = 0;
double soma = 0;
double soma_q = 0;
double variancia = 0;
for (i = 0; i < NOTA_MAXIMA; i++) {
soma += contagem[i] * i;
}
double media = soma / n;
for (i = 0; i < NOTA_MAXIMA; i++) {
soma_q += ((i - media) * (i - media)) * contagem[i];
}
variancia = soma_q / n;
dp = fsqrt(variancia);
if (dp < 0) {
dp *= -1;
}
return dp;
}
int main(int argc, char *argv[]) {
/*
Variaveis para o calculo do tempo de execucao
e indices dos loops.
*/
double start_time, time;
int i, j, k;
// Define o numero de threads
omp_set_num_threads(NTHREADS);
#ifndef DEBUG
// Leitura dos dados
scanf("%d %d %d %d", &R, &C, &A, &SEED);
// Definicao da semente aleatoria
srand(SEED);
// Alocacao de memoria para os dados:
// Geracao das matrizes
regioes = (int ***) malloc(sizeof(int **) * R);
for (i = 0; i < R; i++) {
cidades = (int **) malloc(sizeof(int *) * C);
for (j = 0; j < C; j++) {
alunos = (int *) malloc(sizeof(int) * A);
for (k = 0; k < A; k++) {
alunos[k] = rand() % 101;
}
cidades[j] = alunos;
}
regioes[i] = cidades;
}
#else
// Debug mode (matriz igual ao da especificacao)
debug();
#endif
// Alocacao de memoria para os vetores de contagem
contagem_regioes = (int **) malloc(sizeof(int *) * R);
contagem_pais = (int *) calloc(NOTA_MAXIMA, sizeof(int));
contagem_cidades = (int ***) malloc(sizeof(int **) * R);
for (i = 0; i < R; i++) {
// para cada região, um vetor de contagem para cada cidade
contagem_cidades[i] = (int **) malloc(sizeof(int *) * C);
for (j = 0; j < C; j++) {
// para cada cidade, um vetor de contagem das notas
contagem_cidades[i][j] = (int *) calloc(NOTA_MAXIMA, sizeof(int));
}
contagem_regioes[i] = (int *) calloc(NOTA_MAXIMA, sizeof(int));
}
escrever_matriz();
// Alocacao de memoria para os resultados:
// Cidades
menor = (int **) malloc(sizeof(int *) * R);
maior = (int **) malloc(sizeof(int *) * R);
mediana = (double **) malloc(sizeof(double *) * R);
media = (double **) malloc(sizeof(double *) * R);
DP = (double **) malloc(sizeof(double *) * R);
for (i = 0; i < R; i++) {
menor[i] = (int *) malloc(sizeof(int) * C);
maior[i] = (int *) malloc(sizeof(int) * C);
mediana[i] = (double *) malloc(sizeof(double) * C);
media[i] = (double *) malloc(sizeof(double) * C);
DP[i] = (double *) malloc(sizeof(double) * C);
}
// Regioes
menor_regiao = (int *) malloc(sizeof(int) * R);
maior_regiao = (int *) malloc(sizeof(int) * R);
mediana_regiao = (double *) malloc(sizeof(double) * R);
media_regiao = (double *) malloc(sizeof(double) * R);
DP_regiao = (double *) malloc(sizeof(double) * R);
// Geração dos vetores de contagem
// Comeca a contar o tempo de execucao
start_time = omp_get_wtime();
// Static schedule pois os vetores tem o mesmo tamanho
// Collapse pois o R pode ser muito menor que C
#pragma omp parallel for schedule(static) collapse(2)
for (i = 0; i < R; i++) {
// Gerar vetores de contagem para cada cidade
for (j = 0; j < C; j++) {
counting_sort(regioes[i][j], contagem_cidades[i][j]);
}
}
// Para gerar o vetor de contagem da região, somar os
// vetores de cidade.
#pragma omp parallel for schedule(static) collapse(3)
for (i = 0; i < R; i++) {
for (j = 0; j < C; j++) {
for (k = 0; k < NOTA_MAXIMA; k++) {
contagem_regioes[i][k] += contagem_cidades[i][j][k];
}
}
}
// Para gerar o vetor de contagem do país, somar
// os vetores de região
#pragma omp parallel for schedule(static) collapse(2)
for (i = 0; i < R; i++) {
for (k = 0; k < NOTA_MAXIMA; k++) {
contagem_pais[k] += contagem_regioes[i][k];
}
}
// Calculo das estatisticas
#pragma omp parallel for schedule(static) collapse(2)
for (i = 0; i < R; i++) {
for (j = 0; j < C; j++) {
menor[i][j] = calcula_menor(contagem_cidades[i][j]);
maior[i][j] = calcula_maior(contagem_cidades[i][j]);
mediana[i][j] = calcula_mediana(contagem_cidades[i][j], A);
media[i][j] = calcula_media(contagem_cidades[i][j], A);
DP[i][j] = calcula_desvio_padrao(contagem_cidades[i][j], A);
}
}
#pragma omp parallel for schedule(static)
for (i = 0; i < R; i++) {
menor_regiao[i] = calcula_menor(contagem_regioes[i]);
maior_regiao[i] = calcula_maior(contagem_regioes[i]);
mediana_regiao[i] = calcula_mediana(contagem_regioes[i], C * A);
media_regiao[i] = calcula_media(contagem_regioes[i], C * A);
DP_regiao[i] = calcula_desvio_padrao(contagem_regioes[i], C * A);
}
/*
Consideramos o seguinte trecho de codigo com sections, mas pelos vetores
de contagem terem o mesmo tamanho fixo de 101 elementos, percebemos que
rodar sequencialmente era mais vantajoso
#pragma omp parallel sections
{
#pragma omp section
{
menor_brasil = calcula_menor(contagem_pais);
maior_brasil = calcula_maior(contagem_pais);
mediana_brasil = calcula_mediana(contagem_pais, R * C * A);
}
#pragma omp section
{
media_brasil = calcula_media(contagem_pais, R * C * A);
}
#pragma omp section
{
DP_brasil = calcula_desvio_padrao(contagem_pais, R * C * A);
}
}
*/
menor_brasil = calcula_menor(contagem_pais);
maior_brasil = calcula_maior(contagem_pais);
mediana_brasil = calcula_mediana(contagem_pais, R * C * A);
media_brasil = calcula_media(contagem_pais, R * C * A);
DP_brasil = calcula_desvio_padrao(contagem_pais, R * C * A);
// Calculo das melhores regioes
// Foi utilizado uma reducao definida pelo usuario para que fosse
// possivel armazenar o valor do indice da melhor regiao
struct Compare max_val;
max_val.val = media_regiao[0];
max_val.mr = 0;
#pragma omp parallel for reduction(maximum:max_val)
for (i = 1; i < R; i++) {
if (media_regiao[i] > max_val.val) {
max_val.val = media_regiao[i];
max_val.mr = i;
}
}
melhor_regiao = max_val.mr;
// Foi utilizado uma reducao definida pelo usuario para que fosse
// possivel armazenar o valor do indice da melhor cidade e da
// melhor regiao no qual essa cidade esta
max_val.val = media[0][0];
max_val.mc = 0;
max_val.mcr = 0;
#pragma omp parallel for collapse(2) reduction(maximum:max_val)
for (i = 0; i < R; i++) {
for (j = 0; j < C; j++) {
if (media[i][j] > max_val.val) {
max_val.val = media[i][j];
max_val.mc = j;
max_val.mcr = i;
}
}
}
melhor_cidade = max_val.mc;
melhor_cidade_regiao = max_val.mcr;
// Termina de contar o tempo de execucao
time = omp_get_wtime() - start_time;
// Impressao dos resultados:
// Cidades
for (i = 0; i < R; i++) {
for (j = 0; j < C; j++) {
printf("Reg %d - Cid %d: "
"menor: %d, "
"maior: %d, "
"mediana: %.2lf, "
"media: %.2lf e "
"DP: %.2lf\n",
i, j,
menor[i][j],
maior[i][j],
mediana[i][j],
media[i][j],
DP[i][j]);
}
printf("\n");
}
// Regioes
for (i = 0; i < R; i++) {
printf("Reg %d: "
"menor: %d, "
"maior: %d, "
"mediana: %.2lf, "
"media: %.2lf e "
"DP: %.2lf\n",
i,
menor_regiao[i],
maior_regiao[i],
mediana_regiao[i],
media_regiao[i],
DP_regiao[i]);
}
printf("\n");
// Brasil
printf("Brasil: "
"menor: %d, "
"maior: %d, "
"mediana: %.2lf, "
"media: %.2lf e "
"DP: %.2lf\n\n",
menor_brasil,
maior_brasil,
mediana_brasil,
media_brasil,
DP_brasil);
// Melhores
printf("Melhor regiao: Regiao %d\n"
"Melhor cidade: Regiao %d, "
"Cidade %d\n",
melhor_regiao,
melhor_cidade_regiao,
melhor_cidade);
// Tempo de execucao
printf("Tempo de resposta sem considerar E/S, em segundos: %.3lf\n", time);
// Desalocacao de memoria
for (i = 0; i < R; i++) {
free(menor[i]);
free(maior[i]);
free(mediana[i]);
free(media[i]);
free(DP[i]);
free(contagem_regioes[i]);
for (j = 0; j < C; j++) {
free(regioes[i][j]);
free(contagem_cidades[i][j]);
}
free(regioes[i]);
free(contagem_cidades[i]);
}
free(contagem_cidades);
free(contagem_regioes);
free(contagem_pais);
free(regioes);
free(menor);
free(maior);
free(mediana);
free(media);
free(DP);
free(menor_regiao);
free(maior_regiao);
free(mediana_regiao);
free(media_regiao);
free(DP_regiao);
return 0;
}
|
ast-dump-openmp-distribute-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp distribute simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp distribute simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp distribute simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp distribute simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp distribute simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:4:1, col:28>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:10:1, col:28>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:17:1, col:40>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:29, col:39>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:38> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:38> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:24:1, col:40>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:29, col:39>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:38> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:38> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPDistributeSimdDirective {{.*}} <line:31:1, col:40>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:29, col:39>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:38> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:38> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
dft_ground_state.h | // Copyright (c) 2013-2014 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file dft_ground_state.h
*
* \brief Contains definition and partial implementation of sirius::DFT_ground_state class.
*/
#ifndef __DFT_GROUND_STATE_H__
#define __DFT_GROUND_STATE_H__
#include "potential.h"
#include "density.h"
#include "k_point_set.h"
#include "force.h"
#include "json.hpp"
#include "Geometry/Forces_PS.h"
using json = nlohmann::json;
namespace sirius
{
class DFT_ground_state
{
private:
Simulation_context& ctx_;
Unit_cell& unit_cell_;
Potential& potential_;
Density& density_;
K_point_set& kset_;
Band band_;
std::unique_ptr<Forces_PS> forces_;
int use_symmetry_;
double ewald_energy_{0};
double ewald_energy();
public:
DFT_ground_state(Simulation_context& ctx__,
Potential& potential__,
Density& density__,
K_point_set& kset__,
int use_symmetry__)
: ctx_(ctx__)
, unit_cell_(ctx__.unit_cell())
, potential_(potential__)
, density_(density__)
, kset_(kset__)
, band_(ctx_)
, use_symmetry_(use_symmetry__)
{
if (!ctx_.full_potential()) {
ewald_energy_ = ewald_energy();
}
forces_ = std::unique_ptr<Forces_PS>(new Forces_PS(ctx_, density_, potential_, kset_));
}
mdarray<double, 2> forces();
void forces(mdarray<double, 2>& inout_forces);
int find(double potential_tol, double energy_tol, int num_dft_iter, bool write_state);
void print_info();
/// Return nucleus energy in the electrostatic field.
/** Compute energy of nucleus in the electrostatic potential generated by the total (electrons + nuclei)
* charge density. Diverging self-interaction term z*z/|r=0| is excluded. */
double energy_enuc() const
{
double enuc{0};
if (ctx_.full_potential()) {
for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) {
int ia = unit_cell_.spl_num_atoms(ialoc);
int zn = unit_cell_.atom(ia).zn();
enuc -= 0.5 * zn * potential_.vh_el(ia) * y00;
printf("madelung from SRIUS %16.8f \n", potential_.vh_el(ia) * y00);
}
ctx_.comm().allreduce(&enuc, 1);
}
return enuc;
}
/// Return eigen-value sum of core states.
double core_eval_sum() const
{
double sum{0};
for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) {
sum += unit_cell_.atom_symmetry_class(ic).core_eval_sum() *
unit_cell_.atom_symmetry_class(ic).num_atoms();
}
return sum;
}
double energy_vha()
{
return potential_.energy_vha();
}
double energy_vxc()
{
return density_.rho()->inner(potential_.xc_potential());
}
double energy_exc()
{
double exc = density_.rho()->inner(potential_.xc_energy_density());
if (!ctx_.full_potential()) {
exc += density_.rho_pseudo_core()->inner(potential_.xc_energy_density());
}
return exc;
}
double energy_bxc()
{
double ebxc{0};
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
ebxc += density_.magnetization(j)->inner(potential_.effective_magnetic_field(j));
}
return ebxc;
}
double energy_veff()
{
//return energy_vha() + energy_vxc();
return density_.rho()->inner(potential_.effective_potential());
}
double energy_vloc()
{
return density_.rho()->inner(&potential_.local_potential());
}
/// Full eigen-value sum (core + valence)
double eval_sum()
{
return (core_eval_sum() + kset_.valence_eval_sum());
}
/// Kinetic energy
/** more doc here
*/
double energy_kin()
{
return (eval_sum() - energy_veff() - energy_bxc());
}
double energy_ewald() const
{
return ewald_energy_;
}
/// Total energy of the electronic subsystem.
/** From the definition of the density functional we have:
*
* \f[
* E[\rho] = T[\rho] + E^{H}[\rho] + E^{XC}[\rho] + E^{ext}[\rho]
* \f]
* where \f$ T[\rho] \f$ is the kinetic energy, \f$ E^{H}[\rho] \f$ - electrostatic energy of
* electron-electron density interaction, \f$ E^{XC}[\rho] \f$ - exchange-correlation energy
* and \f$ E^{ext}[\rho] \f$ - energy in the external field of nuclei.
*
* Electrostatic and external field energies are grouped in the following way:
* \f[
* \frac{1}{2} \int \int \frac{\rho({\bf r})\rho({\bf r'}) d{\bf r} d{\bf r'}}{|{\bf r} - {\bf r'}|} +
* \int \rho({\bf r}) V^{nuc}({\bf r}) d{\bf r} = \frac{1}{2} \int V^{H}({\bf r})\rho({\bf r})d{\bf r} +
* \frac{1}{2} \int \rho({\bf r}) V^{nuc}({\bf r}) d{\bf r}
* \f]
* Here \f$ V^{H}({\bf r}) \f$ is the total (electron + nuclei) electrostatic potential returned by the
* poisson solver. Next we transform the remaining term:
* \f[
* \frac{1}{2} \int \rho({\bf r}) V^{nuc}({\bf r}) d{\bf r} =
* \frac{1}{2} \int \int \frac{\rho({\bf r})\rho^{nuc}({\bf r'}) d{\bf r} d{\bf r'}}{|{\bf r} - {\bf r'}|} =
* \frac{1}{2} \int V^{H,el}({\bf r}) \rho^{nuc}({\bf r}) d{\bf r}
* \f]
*/
double total_energy()
{
double tot_en{0};
switch (ctx_.esm_type()) {
case electronic_structure_method_t::full_potential_lapwlo:
case electronic_structure_method_t::full_potential_pwlo: {
tot_en = (energy_kin() + energy_exc() + 0.5 * energy_vha() + energy_enuc());
break;
}
case electronic_structure_method_t::pseudopotential: {
tot_en = (kset_.valence_eval_sum() - energy_veff() + energy_vloc() - potential_.PAW_one_elec_energy()) +
0.5 * energy_vha() + energy_exc() + potential_.PAW_total_energy() + ewald_energy_;
break;
}
}
return tot_en;
}
void symmetrize(Periodic_function<double>* f__,
Periodic_function<double>* gz__,
Periodic_function<double>* gx__,
Periodic_function<double>* gy__)
{
PROFILE("sirius::DFT_ground_state::symmetrize");
auto& comm = ctx_.comm();
/* symmetrize PW components */
unit_cell_.symmetry().symmetrize_function(&f__->f_pw(0), ctx_.gvec(), comm);
switch (ctx_.num_mag_dims()) {
case 1: {
unit_cell_.symmetry().symmetrize_vector_function(&gz__->f_pw(0), ctx_.gvec(), comm);
break;
}
case 3: {
unit_cell_.symmetry().symmetrize_vector_function(&gx__->f_pw(0), &gy__->f_pw(0), &gz__->f_pw(0),
ctx_.gvec(), comm);
break;
}
}
if (ctx_.full_potential()) {
/* symmetrize MT components */
unit_cell_.symmetry().symmetrize_function(f__->f_mt(), comm);
switch (ctx_.num_mag_dims()) {
case 1: {
unit_cell_.symmetry().symmetrize_vector_function(gz__->f_mt(), comm);
break;
}
case 3: {
unit_cell_.symmetry().symmetrize_vector_function(gx__->f_mt(), gy__->f_mt(), gz__->f_mt(), comm);
break;
}
}
}
}
Band const& band() const
{
return band_;
}
json serialize()
{
json dict;
dict["mpi_grid"] = ctx_.mpi_grid_dims();
std::vector<int> fftgrid(3);
for (int i = 0; i < 3; i++) {
fftgrid[i] = ctx_.fft().grid().size(i);
}
dict["fft_grid"] = fftgrid;
if (!ctx_.full_potential()) {
for (int i = 0; i < 3; i++) {
fftgrid[i] = ctx_.fft_coarse().grid().size(i);
}
dict["fft_coarse_grid"] = fftgrid;
}
dict["num_fv_states"] = ctx_.num_fv_states();
dict["num_bands"] = ctx_.num_bands();
dict["aw_cutoff"] = ctx_.aw_cutoff();
dict["pw_cutoff"] = ctx_.pw_cutoff();
dict["omega"] = ctx_.unit_cell().omega();
dict["chemical_formula"] = ctx_.unit_cell().chemical_formula();
dict["num_atoms"] = ctx_.unit_cell().num_atoms();
dict["energy"] = json::object();
dict["energy"]["total"] = total_energy();
dict["energy"]["enuc"] = energy_enuc();
dict["energy"]["core_eval_sum"] = core_eval_sum();
dict["energy"]["vha"] = energy_vha();
dict["energy"]["vxc"] = energy_vxc();
dict["energy"]["exc"] = energy_exc();
dict["energy"]["bxc"] = energy_bxc();
dict["energy"]["veff"] = energy_veff();
dict["energy"]["eval_sum"] = eval_sum();
dict["energy"]["kin"] = energy_kin();
dict["energy"]["ewald"] = energy_ewald();
dict["efermi"] = kset_.energy_fermi();
dict["band_gap"] = kset_.band_gap();
dict["core_leakage"] = density_.core_leakage();
return std::move(dict);
}
};
inline double DFT_ground_state::ewald_energy()
{
PROFILE("sirius::DFT_ground_state::ewald_energy");
double alpha = 1.5;
double ewald_g = 0;
#pragma omp parallel
{
double ewald_g_pt = 0;
#pragma omp for
for (int igloc = 0; igloc < ctx_.gvec_count(); igloc++) {
int ig = ctx_.gvec_offset() + igloc;
double g2 = std::pow(ctx_.gvec().shell_len(ctx_.gvec().shell(ig)), 2);
double_complex rho(0, 0);
for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) {
rho += ctx_.gvec_phase_factor(ig, ia) * static_cast<double>(unit_cell_.atom(ia).zn());
}
if (ig) {
ewald_g_pt += std::pow(std::abs(rho), 2) * std::exp(-g2 / 4 / alpha) / g2;
} else {
ewald_g_pt -= std::pow(unit_cell_.num_electrons(), 2) / alpha / 4; // constant term in QE comments
}
if (ctx_.gvec().reduced() && ig) {
rho = 0;
for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) {
rho += std::conj(ctx_.gvec_phase_factor(ig, ia)) * static_cast<double>(unit_cell_.atom(ia).zn());
}
ewald_g_pt += std::pow(std::abs(rho), 2) * std::exp(-g2 / 4 / alpha) / g2;
}
}
#pragma omp critical
ewald_g += ewald_g_pt;
}
ctx_.comm().allreduce(&ewald_g, 1);
ewald_g *= (twopi / unit_cell_.omega());
/* remove self-interaction */
for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) {
ewald_g -= std::sqrt(alpha / pi) * std::pow(unit_cell_.atom(ia).zn(), 2);
}
double ewald_r = 0;
#pragma omp parallel
{
double ewald_r_pt = 0;
#pragma omp for
for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) {
for (int i = 1; i < unit_cell_.num_nearest_neighbours(ia); i++) {
int ja = unit_cell_.nearest_neighbour(i, ia).atom_id;
double d = unit_cell_.nearest_neighbour(i, ia).distance;
ewald_r_pt += 0.5 * unit_cell_.atom(ia).zn() * unit_cell_.atom(ja).zn() *
gsl_sf_erfc(std::sqrt(alpha) * d) / d;
}
}
#pragma omp critical
ewald_r += ewald_r_pt;
}
return (ewald_g + ewald_r);
}
inline void DFT_ground_state::forces(mdarray<double, 2>& inout_forces)
{
PROFILE("sirius::DFT_ground_state::forces");
forces_->calc_forces_contributions();
forces_->sum_forces(inout_forces);
if(ctx_.comm().rank() == 0)
{
auto print_forces=[&](mdarray<double, 2> const& forces)
{
for(int ia=0; ia < unit_cell_.num_atoms(); ia++)
{
printf("Atom %4i force = %15.7f %15.7f %15.7f \n",
unit_cell_.atom(ia).type_id(), forces(0,ia), forces(1,ia), forces(2,ia));
}
};
std::cout<<"===== Total Forces in Ha/bohr =====" << std::endl;
print_forces( inout_forces );
std::cout<<"===== Forces: ultrasoft contribution from Qij =====" << std::endl;
print_forces( forces_->ultrasoft_forces() );
std::cout<<"===== Forces: non-local contribution from Beta-projectors =====" << std::endl;
print_forces( forces_->nonlocal_forces() );
std::cout<<"===== Forces: local contribution from local potential=====" << std::endl;
print_forces( forces_->local_forces() );
std::cout<<"===== Forces: nlcc contribution from core density=====" << std::endl;
print_forces( forces_->nlcc_forces() );
std::cout<<"===== Forces: Ewald forces from ions =====" << std::endl;
print_forces( forces_->ewald_forces() );
}
}
inline mdarray<double,2 > DFT_ground_state::forces()
{
PROFILE("sirius::DFT_ground_state::forces");
mdarray<double,2 > tot_forces(3, unit_cell_.num_atoms());
forces(tot_forces);
return std::move(tot_forces);
}
inline int DFT_ground_state::find(double potential_tol, double energy_tol, int num_dft_iter, bool write_state)
{
PROFILE("sirius::DFT_ground_state::scf_loop");
double eold{0}, rms{0};
if (ctx_.full_potential()) {
potential_.mixer_init();
} else {
density_.mixer_init();
}
int result{-1};
// tbb::task_scheduler_init tbb_init(omp_get_num_threads());
for (int iter = 0; iter < num_dft_iter; iter++) {
sddk::timer t1("sirius::DFT_ground_state::scf_loop|iteration");
/* find new wave-functions */
band_.solve_for_kset(kset_, potential_, true);
/* find band occupancies */
kset_.find_band_occupancies();
/* generate new density from the occupied wave-functions */
density_.generate(kset_);
/* symmetrize density and magnetization */
if (use_symmetry_) {
symmetrize(density_.rho(), density_.magnetization(0), density_.magnetization(1),
density_.magnetization(2));
}
/* set new tolerance of iterative solver */
if (!ctx_.full_potential()) {
rms = density_.mix();
double tol = std::max(1e-12, 0.1 * density_.dr2() / ctx_.unit_cell().num_valence_electrons());
if (ctx_.comm().rank() == 0) {
printf("dr2: %18.10f, tol: %18.10f\n", density_.dr2(), tol);
}
ctx_.set_iterative_solver_tolerance(std::min(ctx_.iterative_solver_tolerance(), tol));
}
if (!ctx_.full_potential()) {
density_.generate_paw_loc_density();
}
/* transform density to realspace after mixing and symmetrization */
density_.fft_transform(1);
/* check number of elctrons */
density_.check_num_electrons();
//== if (ctx_.num_mag_dims())
//== {
//== for (int ia = 0; ia < unit_cell_.num_atoms(); ia++)
//== {
//== vector3d<double> mag(0, 0, 0);
//== for (int j0 = 0; j0 < ctx_.fft().grid().size(0); j0++)
//== {
//== for (int j1 = 0; j1 < ctx_.fft().grid().size(1); j1++)
//== {
//== for (int j2 = 0; j2 < ctx_.fft().local_size_z(); j2++)
//== {
//== /* get real space fractional coordinate */
//== auto v0 = vector3d<double>(double(j0) / ctx_.fft().grid().size(0),
//== double(j1) / ctx_.fft().grid().size(1),
//== double(ctx_.fft().offset_z() + j2) / ctx_.fft().grid().size(2));
//== /* index of real space point */
//== int ir = ctx_.fft().grid().index_by_coord(j0, j1, j2);
//== for (int t0 = -1; t0 <= 1; t0++)
//== {
//== for (int t1 = -1; t1 <= 1; t1++)
//== {
//== for (int t2 = -1; t2 <= 1; t2++)
//== {
//== vector3d<double> v1 = v0 - (unit_cell_.atom(ia).position() + vector3d<double>(t0, t1, t2));
//== auto r = unit_cell_.get_cartesian_coordinates(v1);
//== auto a = r.length();
//== if (a <= 2.0)
//== {
//== mag[2] += density_.magnetization(0)->f_rg(ir);
//== }
//== }
//== }
//== }
//== }
//== }
//== }
//== for (int x: {0, 1, 2}) mag[x] *= (unit_cell_.omega() / ctx_.fft().size());
//== printf("atom: %i, mag: %f %f %f\n", ia, mag[0], mag[1], mag[2]);
//== }
//== }
/* compute new potential */
potential_.generate(density_);
/* symmetrize potential and effective magnetic field */
if (use_symmetry_) {
symmetrize(potential_.effective_potential(), potential_.effective_magnetic_field(0),
potential_.effective_magnetic_field(1), potential_.effective_magnetic_field(2));
}
/* transform potential to real space after symmetrization */
potential_.fft_transform(1);
/* compute new total energy for a new density */
double etot = total_energy();
if (ctx_.full_potential()) {
rms = potential_.mix();
double tol = std::max(1e-12, rms);
if (ctx_.comm().rank() == 0) {
printf("tol: %18.10f\n", tol);
}
ctx_.set_iterative_solver_tolerance(std::min(ctx_.iterative_solver_tolerance(), tol));
}
/* write some information */
print_info();
if (ctx_.comm().rank() == 0) {
printf("iteration : %3i, RMS %18.12f, energy difference : %12.6f\n", iter, rms, etot - eold);
}
if (std::abs(eold - etot) < energy_tol && rms < potential_tol) {
result = iter;
break;
}
eold = etot;
}
if (write_state) {
ctx_.create_storage_file();
potential_.save();
density_.save();
}
// tbb_init.terminate();
return result;
}
inline void DFT_ground_state::print_info()
{
double evalsum1 = kset_.valence_eval_sum();
double evalsum2 = core_eval_sum();
double ekin = energy_kin();
double evxc = energy_vxc();
double eexc = energy_exc();
double ebxc = energy_bxc();
double evha = energy_vha();
double etot = total_energy();
double gap = kset_.band_gap() * ha2ev;
double ef = kset_.energy_fermi();
double core_leak = density_.core_leakage();
double enuc = energy_enuc();
double one_elec_en = evalsum1 - (evxc + evha);
if (ctx_.esm_type() == electronic_structure_method_t::pseudopotential) {
one_elec_en -= potential_.PAW_one_elec_energy();
}
std::vector<double> mt_charge;
double it_charge;
double total_charge = density_.rho()->integrate(mt_charge, it_charge);
double total_mag[3];
std::vector<double> mt_mag[3];
double it_mag[3];
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
total_mag[j] = density_.magnetization(j)->integrate(mt_mag[j], it_mag[j]);
}
if (ctx_.comm().rank() == 0) {
if (ctx_.full_potential()) {
double total_core_leakage = 0.0;
printf("\n");
printf("Charges and magnetic moments\n");
for (int i = 0; i < 80; i++) printf("-");
printf("\n");
printf("atom charge core leakage");
if (ctx_.num_mag_dims()) printf(" moment |moment|");
printf("\n");
for (int i = 0; i < 80; i++) printf("-");
printf("\n");
for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) {
double core_leakage = unit_cell_.atom(ia).symmetry_class().core_leakage();
total_core_leakage += core_leakage;
printf("%4i %10.6f %10.8e", ia, mt_charge[ia], core_leakage);
if (ctx_.num_mag_dims()) {
vector3d<double> v;
v[2] = mt_mag[0][ia];
if (ctx_.num_mag_dims() == 3) {
v[0] = mt_mag[1][ia];
v[1] = mt_mag[2][ia];
}
printf(" [%8.4f, %8.4f, %8.4f] %10.6f", v[0], v[1], v[2], v.length());
}
printf("\n");
}
printf("\n");
printf("interstitial charge : %10.6f\n", it_charge);
if (ctx_.num_mag_dims()) {
vector3d<double> v;
v[2] = it_mag[0];
if (ctx_.num_mag_dims() == 3) {
v[0] = it_mag[1];
v[1] = it_mag[2];
}
printf("interstitial moment : [%8.4f, %8.4f, %8.4f], magnitude : %10.6f\n",
v[0], v[1], v[2], v.length());
}
printf("\n");
printf("total charge : %10.6f\n", total_charge);
printf("total core leakage : %10.8e\n", total_core_leakage);
if (ctx_.num_mag_dims()) {
vector3d<double> v;
v[2] = total_mag[0];
if (ctx_.num_mag_dims() == 3) {
v[0] = total_mag[1];
v[1] = total_mag[2];
}
printf("total moment : [%8.4f, %8.4f, %8.4f], magnitude : %10.6f\n",
v[0], v[1], v[2], v.length());
}
}
printf("\n");
printf("Energy\n");
for (int i = 0; i < 80; i++) printf("-");
printf("\n");
printf("valence_eval_sum : %18.8f\n", evalsum1);
if (ctx_.full_potential()) {
printf("core_eval_sum : %18.8f\n", evalsum2);
printf("kinetic energy : %18.8f\n", ekin);
printf("enuc : %18.8f\n", enuc);
}
printf("<rho|V^{XC}> : %18.8f\n", evxc);
printf("<rho|E^{XC}> : %18.8f\n", eexc);
printf("<mag|B^{XC}> : %18.8f\n", ebxc);
printf("<rho|V^{H}> : %18.8f\n", evha);
if (!ctx_.full_potential()) {
printf("one-electron contribution : %18.8f (Ha), %18.8f (Ry)\n", one_elec_en, one_elec_en * 2); // eband + deband in QE
printf("hartree contribution : %18.8f\n", 0.5 * evha);
printf("xc contribution : %18.8f\n", eexc);
printf("ewald contribution : %18.8f\n", ewald_energy_);
printf("PAW contribution : %18.8f\n", potential_.PAW_total_energy());
}
printf("Total energy : %18.8f (Ha), %18.8f (Ry)\n", etot, etot * 2);
printf("\n");
printf("band gap (eV) : %18.8f\n", gap);
printf("Efermi : %18.8f\n", ef);
printf("\n");
if (ctx_.full_potential()) {
printf("core leakage : %18.8f\n", core_leak);
}
}
}
} // namespace
#endif // __DFT_GROUND_STATE_H__
/** \page DFT Spin-polarized DFT
* \section section1 Preliminary notes
*
* \note Here and below sybol \f$ {\boldsymbol \sigma} \f$ is reserved for the vector of Pauli matrices. Spin components
* are labeled with \f$ \alpha \f$ or \f$ \beta\f$.
*
* Wave-function of spin-1/2 particle is a two-component spinor:
* \f[
* {\bf \varphi}({\bf r})=\left( \begin{array}{c} \varphi_1({\bf r}) \\ \varphi_2({\bf r}) \end{array} \right)
* \f]
* Operator of spin:
* \f[
* {\bf \hat S}=\frac{\hbar}{2}{\bf \sigma},
* \f]
* Pauli matrices:
* \f[
* \sigma_x=\left( \begin{array}{cc}
* 0 & 1 \\
* 1 & 0 \\ \end{array} \right) \,
* \sigma_y=\left( \begin{array}{cc}
* 0 & -i \\
* i & 0 \\ \end{array} \right) \,
* \sigma_z=\left( \begin{array}{cc}
* 1 & 0 \\
* 0 & -1 \\ \end{array} \right)
* \f]
*
* \section section2 Density and magnetization
* Density is defined as:
* \f[
* \rho({\bf r}) = \sum_{j}^{occ} \Psi_{j}^{*}({\bf r}){\bf I} \Psi_{j}({\bf r}) =
* \sum_{j}^{occ} \psi_{j}^{\uparrow *} \psi_{j}^{\uparrow} + \psi_{j}^{\downarrow *} \psi_{j}^{\downarrow}
* \f]
* Magnetization is defined as:
* \f[
* {\bf m}({\bf r}) = \sum_{j}^{occ} \Psi_{j}^{*}({\bf r}) {\boldsymbol \sigma} \Psi_{j}({\bf r})
* \f]
* \f[
* m_x({\bf r}) = \sum_{j}^{occ} \psi_{j}^{\uparrow *} \psi_{j}^{\downarrow} + \psi_{j}^{\downarrow *} \psi_{j}^{\uparrow}
* \f]
* \f[
* m_y({\bf r}) = \sum_{j}^{occ} -i \psi_{j}^{\uparrow *} \psi_{j}^{\downarrow} + i \psi_{j}^{\downarrow *} \psi_{j}^{\uparrow}
* \f]
* \f[
* m_z({\bf r}) = \sum_{j}^{occ} \psi_{j}^{\uparrow *} \psi_{j}^{\uparrow} - \psi_{j}^{\downarrow *} \psi_{j}^{\downarrow}
* \f]
* Density matrix is defined as:
* \f[
* {\boldsymbol \rho}({\bf r}) = \frac{1}{2} \Big( {\bf I}\rho({\bf r}) + {\boldsymbol \sigma} {\bf m}({\bf r})\Big) =
* \frac{1}{2} \sum_{j}^{occ} \left( \begin{array}{cc} \psi_{j}^{\uparrow *} \psi_{j}^{\uparrow} &
* \psi_{j}^{\downarrow *} \psi_{j}^{\uparrow} \\
* \psi_{j}^{\uparrow *} \psi_{j}^{\downarrow} &
* \psi_{j}^{\downarrow *} \psi_{j}^{\downarrow} \end{array} \right)
* \f]
* Pay attention to the order of spin indices in the \f$ 2 \times 2 \f$ density matrix:
* \f[
* \rho_{\alpha \beta}({\bf r}) = \frac{1}{2} \sum_{j}^{occ} \psi_{j}^{\beta *}({\bf r})\psi_{j}^{\alpha}({\bf r})
* \f]
*/
/**
\page stress Stress tensor
\section section1 Preliminary notes
Derivative of the G-vector in Cartesian coordinates over the lattice vector components:
\f[
\frac{\partial G_{\beta}}{\partial a_{\mu\nu}} + ({\bf a}^{-1})_{\nu \beta} G_{\mu} = 0
\f]
Mathematica proof script:
\verbatim
A = Table[Subscript[a, i, j], {i, 1, 3}, {j, 1, 3}];
invA = Inverse[A];
B = 2*Pi*Transpose[Inverse[A]];
G = Table[Subscript[g, i], {i, 1, 3}];
gvec = B.G;
Do[
Print[FullSimplify[
D[gvec[[beta]], Subscript[a, mu, nu]] + invA[[nu]][[beta]]*gvec[[mu]]]],
{beta, 1, 3}, {mu, 1, 3}, {nu, 1,3}]
\endverbatim
Another relation:
\f[
\frac{\partial}{\partial a_{\mu \nu}} \frac{1}{\sqrt{\Omega}} + \frac{1}{2} \frac{1}{\sqrt{\Omega}} ({\bf a}^{-1})_{\nu \mu} = 0
\f]
Mathematica proof script:
\verbatim
A = Table[Subscript[a, i, j], {i, 1, 3}, {j, 1, 3}];
invA = Inverse[A];
Do[
Print[FullSimplify[
D[1/Sqrt[Det[A]], Subscript[a, mu, nu]] + (1/2)*(1/Sqrt[Det[A]]) * invA[[nu]][[mu]]
]
],
{mu, 1, 3}, {nu, 1, 3}]
\endverbatim
Derivative of the G-vector real spherical harmonics over the lattice vector components:
\f[
\frac{\partial R_{\ell m}(\theta, \phi)}{\partial a_{\mu \nu}} =
\frac{\partial R_{\ell m}(\theta, \phi)}{\partial \theta} \frac{\partial \theta} {\partial a_{\mu \nu}} +
\frac{\partial R_{\ell m}(\theta, \phi)}{\partial \phi} \frac{\partial \phi} {\partial a_{\mu \nu}}
\f]
Derivatives of the \f$ R_{\ell m} \f$ with respect to the \f$ \theta,\, \phi\f$ angles can be tabulated up to a given \f$ \ell_{max} \f$.
The derivatives of angles are computed as following:
\f[
\frac{\partial \theta} {\partial a_{\mu \nu}} = \sum_{\beta=1}^{3} \frac{\partial \theta}{\partial G_{\beta}} \frac{\partial G_{\beta}} {\partial a_{\mu \nu}}
\f]
\f[
\frac{\partial \phi} {\partial a_{\mu \nu}} = \sum_{\beta=1}^{3} \frac{\partial \phi}{\partial G_{\beta}} \frac{\partial G_{\beta}} {\partial a_{\mu \nu}}
\f]
where
\f[
\frac{\partial \theta}{\partial G_{x}} = \frac{\cos(\phi) \cos(\theta)}{G} \\
\frac{\partial \theta}{\partial G_{y}} = \frac{\cos(\theta) \sin(\phi)}{G} \\
\frac{\partial \theta}{\partial G_{z}} = -\frac{\sin(\theta)}{G}
\f]
and
\f[
\frac{\partial \phi}{\partial G_{x}} = -\frac{\sin(\phi)}{\sin(\theta) G} \\
\frac{\partial \phi}{\partial G_{y}} = \frac{\cos(\phi)}{\sin(\theta) G} \\
\frac{\partial \phi}{\partial G_{z}} = 0
\f]
The derivative of \f$ phi \f$ has discontinuities at \f$ \theta = 0, \theta=\pi \f$. This, however, is not a problem, because
multiplication by the the derivative of \f$ R_{\ell m} \f$ removes it. The following functions have to be hardcoded:
\f[
\frac{\partial R_{\ell m}(\theta, \phi)}{\partial \theta} \\
\frac{\partial R_{\ell m}(\theta, \phi)}{\partial \phi} \frac{1}{\sin(\theta)}
\f]
Derivatives of the spherical Bessel functions are computed in the same fashion:
\f[
\frac{\partial j_{\ell}(Gx)}{\partial a_{\mu \nu}} =
\frac{\partial j_{\ell}(Gx)}{\partial G} \frac{\partial G} {\partial a_{\mu \nu}} =
\frac{\partial j_{\ell}(Gx)}{\partial G} \sum_{\beta=1}^{3}\frac{\partial G}{\partial G_{\beta}} \frac{\partial G_{\beta}} {\partial a_{\mu \nu}}
\f]
The derivatives of \f$ G \f$ are:
\f[
\frac{\partial G}{\partial G_{x}} = \sin(\theta)\cos(\phi) \\
\frac{\partial G}{\partial G_{y}} = \sin(\theta)\sin(\phi) \\
\frac{\partial G}{\partial G_{z}} = \cos(\theta)
\f]
Let's write the full expression for the derivative of beta-projector matrix elements with respect to lattice vector
components:
\f[
\frac{\partial \langle {\bf G+k}|\beta_{\ell m} \rangle} {\partial a_{\mu \nu}} =
\frac{\partial} {\partial a_{\mu \nu}} \frac{4\pi}{\sqrt{\Omega}}(-i)^{\ell} R_{\ell m}(\theta_{G+k}, \phi_{G+k}) \int \beta_{\ell}(r) j_{\ell}(Gr) r^2 dr =\\
\frac{4\pi}{\sqrt{\Omega}} (-i)^{\ell} \Bigg[ \int \beta_{\ell}(r) j_{\ell}(Gr) r^2 dr
\Big( \frac{\partial R_{\ell m}(\theta, \phi)}{\partial a_{\mu \nu}} - \frac{1}{2} R_{\ell m}(\theta, \phi) ({\bf a}^{-1})_{\nu \mu} \Big) +
R_{\ell m}(\theta, \phi) \int \beta_{\ell}(r) \frac{\partial j_{\ell}(Gr)}{\partial a_{\mu \nu}} r^2 dr \Bigg]
\f]
*/
|
GB_unaryop__minv_int8_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int8_int32
// op(A') function: GB_tran__minv_int8_int32
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int8_int32
(
int8_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int8_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
visual-effects.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/visual-effects.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if ((noise_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*edge_image;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
edge_image->alpha_trait=UndefinedPixelTrait;
charcoal_image=(Image *) NULL;
status=ClampImage(edge_image,exception);
if (status != MagickFalse)
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
status=NormalizeImage(charcoal_image,exception);
if (status != MagickFalse)
status=NegateImage(charcoal_image,MagickFalse,exception);
if (status != MagickFalse)
status=GrayscaleImage(charcoal_image,image->intensity,exception);
if (status == MagickFalse)
charcoal_image=DestroyImage(charcoal_image);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
double
sum;
sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]*
GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[v][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
register ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x/scale.x+center.x),
(double) (factor*delta.y/scale.y+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if ((morph_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *magick_restrict random_info,
const double pixel,const double noise)
{
MagickRealType
plasma;
plasma=pixel+noise*GetPseudoRandomValue(random_info)-noise/2.0;
return(ClampToQuantum(plasma));
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *magick_restrict random_info,
const SegmentInfo *magick_restrict segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
MagickStatusType
status;
register const Quantum
*magick_restrict u,
*magick_restrict v;
register Quantum
*magick_restrict q;
register ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) < MagickEpsilon) &&
(fabs(segment->y2-segment->y1) < MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
status=MagickTrue;
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->x2-x_mid) >= MagickEpsilon))
{
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1,
exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1,
exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) >= MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickFalse);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) >= MagickEpsilon)
{
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y,
1,1,exception);
v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y,
1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) >= MagickEpsilon) ||
(fabs(segment->y1-segment->y2) >= MagickEpsilon))
{
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(status == 0 ? MagickFalse : MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
*text;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption,
exception);
if (text != (char *) NULL)
{
char
geometry[MagickPathExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
register ssize_t
i;
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
status=ClampImage(dodge_image,exception);
if (status != MagickFalse)
status=NormalizeImage(dodge_image,exception);
if (status != MagickFalse)
status=NegateImage(dodge_image,MagickFalse,exception);
if (status != MagickFalse)
status=TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
return(SyncImage(image,exception));
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
register Quantum
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(stereo_image,GetPixelRed(left_image,p),r);
SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r);
SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
double
radius;
Image
*canvas_image,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(swirl_image,OnAlphaChannel,exception);
/*
Compute scaling factor.
*/
center.x=(double) canvas_image->columns/2.0;
center.y=(double) canvas_image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas_image,interpolate_view,
swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),
(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,
exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,SwirlImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black*
(1.0-(4.0*(weight*weight)));
pixel.alpha=(MagickRealType) GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_image_view,
*wave_view;
float
*sine_map;
Image
*canvas_image,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas_image,OpaqueAlpha,exception);
wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t)
(canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (float *) NULL)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double)
((2.0*MagickPI*i)/wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_image_view=AcquireVirtualCacheView(canvas_image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas_image,canvas_image_view,
wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,WaveImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_image_view=DestroyCacheView(canvas_image_view);
canvas_image=DestroyImage(canvas_image);
sine_map=(float *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1UL << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1UL << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
omp-demo.c | #include <omp.h>
#define LOOPS 2
int main(void)
{
int i=0;
#pragma omp parallel num_threads(3)
{
#pragma omp task
{
#pragma omp taskloop
for (i=0; i<LOOPS; i++)
{
}
}
}
#pragma omp parallel num_threads(3)
{
#pragma omp task
{
#pragma omp taskloop
for (i=0; i<LOOPS; i++)
{
}
}
}
return 0;
}
|
init_state_setter.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
In this file, the initial state of the simulation is read in from a netcdf file.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <netcdf.h>
#include <atmostracers.h>
#include "../game_types.h"
#include "../game_constants.h"
#include "../spatial_operators/spatial_operators.h"
#include "../thermodynamics/thermodynamics.h"
#include "../../grid_generator/src/standard.h"
#define NCERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(2);}
int set_soil_temp(Grid *, Soil *, State *, double [], char []);
int set_ideal_init(State *state, Grid* grid, Dualgrid* dualgrid, Soil *soil, Diagnostics *diagnostics, Forcings *forcings, int ideal_input_id, char grid_file[])
{
/*
This function sets the initial state of the model atmosphere for idealized test cases.
*/
double *pressure = malloc(NO_OF_SCALARS*sizeof(double));
double *temperature = malloc(NO_OF_SCALARS*sizeof(double));
double *water_vapour_density = calloc(NO_OF_SCALARS, sizeof(double));
double z_height;
double lat, lon, u, v, pressure_value, specific_humidity, total_density;
// dummy argument
double dummy_0 = 0.0;
double dummy_1 = 0.0;
double dummy_2 = 0.0;
double dummy_3 = 0.0;
double dummy_4 = 0.0;
double dummy_5 = 0.0;
double dummy_6 = 0.0;
int layer_index, h_index;
int zero = 0;
int one = 1;
double one_double = 1;
// 3D scalar fields determined here, apart from density
#pragma omp parallel for private(layer_index, h_index, lat, lon, z_height, total_density, specific_humidity)
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
layer_index = i/NO_OF_SCALARS_H;
h_index = i - layer_index*NO_OF_SCALARS_H;
lat = grid -> latitude_scalar[h_index];
lon = grid -> longitude_scalar[h_index];
z_height = grid -> z_scalar[i];
// standard atmosphere
if (ideal_input_id == 0)
{
temperature[i] = standard_temp(z_height);
pressure[i] = standard_pres(z_height);
}
// dry Ullrich test
if (ideal_input_id == 1)
{
baroclinic_wave_test(&one, &zero, &one, &one_double, &lon, &lat, &pressure[i], &z_height, &one, &dummy_0, &dummy_1, &temperature[i],
&dummy_2, &dummy_3, &dummy_4, &dummy_5, &dummy_6);
}
// moist Ullrich test
if (ideal_input_id == 2)
{
baroclinic_wave_test(&one, &one, &one, &one_double, &lon, &lat, &pressure[i], &z_height, &one, &dummy_0, &dummy_1, &temperature[i],
&dummy_2, &dummy_4, &dummy_5, &total_density, &specific_humidity);
water_vapour_density[i] = total_density*specific_humidity;
}
}
// horizontal wind fields are determind here
// reading the grid properties which are not part of the struct grid
double *latitude_vector = malloc(NO_OF_VECTORS_H*sizeof(double));
double *longitude_vector = malloc(NO_OF_VECTORS_H*sizeof(double));
int ncid_grid, retval, latitude_vector_id, longitude_vector_id;
if ((retval = nc_open(grid_file, NC_NOWRITE, &ncid_grid)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "latitude_vector", &latitude_vector_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid_grid, "longitude_vector", &longitude_vector_id)))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, latitude_vector_id, &latitude_vector[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid_grid, longitude_vector_id, &longitude_vector[0])))
NCERR(retval);
if ((retval = nc_close(ncid_grid)))
NCERR(retval);
#pragma omp parallel for private(lat, lon, z_height, u, v, dummy_0, dummy_1, dummy_2, dummy_3, dummy_4, dummy_5, dummy_6)
for (int i = 0; i < NO_OF_LAYERS; ++i)
{
for (int j = 0; j < NO_OF_VECTORS_H; ++j)
{
lat = latitude_vector[j];
lon = longitude_vector[j];
z_height = grid -> z_vector[NO_OF_SCALARS_H + j + i*NO_OF_VECTORS_PER_LAYER];
// standard atmosphere: no wind
if (ideal_input_id == 0)
{
state -> wind[NO_OF_SCALARS_H + i*NO_OF_VECTORS_PER_LAYER + j] = 0;
}
// dry Ullrich test
if (ideal_input_id == 1)
{
baroclinic_wave_test(&one, &zero, &one, &one_double, &lon, &lat, &dummy_0, &z_height, &one, &u, &v, &dummy_1, &dummy_2, &dummy_3, &dummy_4, &dummy_5, &dummy_6);
state -> wind[NO_OF_SCALARS_H + i*NO_OF_VECTORS_PER_LAYER + j] = u*cos(grid -> direction[j]) + v*sin(grid -> direction[j]);
}
// moist Ullrich test
if (ideal_input_id == 2)
{
baroclinic_wave_test(&one, &one, &one, &one_double, &lon, &lat, &dummy_0, &z_height, &one, &u, &v, &dummy_1, &dummy_2, &dummy_3, &dummy_4, &dummy_5, &dummy_6);
state -> wind[NO_OF_SCALARS_H + i*NO_OF_VECTORS_PER_LAYER + j] = u*cos(grid -> direction[j]) + v*sin(grid -> direction[j]);
}
}
}
free(latitude_vector);
free(longitude_vector);
// setting the vertical wind field equal to zero
#pragma omp parallel for
for (int i = 0; i < NO_OF_LEVELS; ++i)
{
for (int j = 0; j < NO_OF_SCALARS_H; ++j)
{
state -> wind[i*NO_OF_VECTORS_PER_LAYER + j] = 0;
}
}
// this is the density which has not yet been hydrostatically balanced
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
diagnostics -> scalar_field_placeholder[i] = pressure[i]/(specific_gas_constants(0)*temperature[i]);
}
scalar_times_vector(diagnostics -> scalar_field_placeholder, state -> wind, diagnostics -> flux_density, grid);
// Now, the potential vorticity is evaluated.
calc_pot_vort(state -> wind, diagnostics -> scalar_field_placeholder, diagnostics, grid, dualgrid);
// Now, the generalized Coriolis term is evaluated.
vorticity_flux(diagnostics -> flux_density, diagnostics -> pot_vort, forcings -> pot_vort_tend, grid, dualgrid);
// Kinetic energy is prepared for the gradient term of the Lamb transformation.
inner_product(state -> wind, state -> wind, diagnostics -> v_squared, grid);
// density is determined out of the hydrostatic equation
int scalar_index;
double b, c;
// theta_pert and exner_pert are a misuse of name here, they contain the full values here
#pragma omp parallel for private(scalar_index, b, c, pressure_value)
for (int h_index = 0; h_index < NO_OF_SCALARS_H; ++h_index)
{
// integrating from bottom to top
for (int layer_index = NO_OF_LAYERS - 1; layer_index >= 0; --layer_index)
{
scalar_index = layer_index*NO_OF_SCALARS_H + h_index;
// lowest layer
if (layer_index == NO_OF_LAYERS - 1)
{
pressure_value = pressure[scalar_index];
state -> exner_pert[scalar_index] = pow(pressure_value/P_0, specific_gas_constants(0)/spec_heat_capacities_p_gas(0));
}
// other layers
else
{
// solving a quadratic equation for the Exner pressure
b = -0.5*state -> exner_pert[scalar_index + NO_OF_SCALARS_H]/temperature[scalar_index + NO_OF_SCALARS_H]
*(temperature[scalar_index] - temperature[scalar_index + NO_OF_SCALARS_H]
+ 2/spec_heat_capacities_p_gas(0)*(grid -> gravity_potential[scalar_index] - grid -> gravity_potential[scalar_index + NO_OF_SCALARS_H]
+ 0.5*diagnostics -> v_squared[scalar_index] - 0.5*diagnostics -> v_squared[scalar_index + NO_OF_SCALARS_H]
- (grid -> z_scalar[scalar_index] - grid -> z_scalar[scalar_index + NO_OF_SCALARS_H])*forcings -> pot_vort_tend[h_index + (layer_index + 1)*NO_OF_VECTORS_PER_LAYER]));
c = pow(state -> exner_pert[scalar_index + NO_OF_SCALARS_H], 2)*temperature[scalar_index]/temperature[scalar_index + NO_OF_SCALARS_H];
state -> exner_pert[scalar_index] = b + pow((pow(b, 2) + c), 0.5);
}
// this is the full potential temperature here
state -> theta_pert[scalar_index] = temperature[scalar_index]/state -> exner_pert[scalar_index];
// scalar_field_placeholder is the dry air density here
diagnostics -> scalar_field_placeholder[scalar_index] = P_0*pow(state -> exner_pert[scalar_index],
spec_heat_capacities_p_gas(0)/specific_gas_constants(0))/(specific_gas_constants(0)*temperature[scalar_index]);
// setting rhotheta according to its definitions
state -> rhotheta[scalar_index] = diagnostics -> scalar_field_placeholder[scalar_index]*state -> theta_pert[scalar_index];
}
}
free(pressure);
// substracting the background state
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
state -> exner_pert[i] = state -> exner_pert[i] - grid -> exner_bg[i];
state -> theta_pert[i] = state -> theta_pert[i] - grid -> theta_bg[i];
}
double *temperatures = malloc((NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS*sizeof(double));
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
for (int j = 0; j < NO_OF_CONDENSED_CONSTITUENTS; ++j)
{
// condensed densities are zero in all test states
state -> rho[j*NO_OF_SCALARS + i] = 0;
// a local LTE is assumed in all test states
temperatures[j*NO_OF_SCALARS + i] = temperature[i];
}
// the dry air density
state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i] = diagnostics -> scalar_field_placeholder[i];
// water vapour density
if (NO_OF_CONDENSED_CONSTITUENTS == 4)
{
state -> rho[(NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS + i] = water_vapour_density[i];
}
// gas temperature
temperatures[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i] = temperature[i];
}
free(temperature);
free(water_vapour_density);
// determining the temperature densities of the condensates
#pragma omp parallel for
for (int i = 0; i < NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS; ++i)
{
state -> condensed_density_temperatures[i] = state -> rho[i]*temperatures[i];
}
// setting the soil temperature
set_soil_temp(grid, soil, state, temperatures, "");
free(temperatures);
// returning 0 indicating success
return 0;
}
int read_init_data(char init_state_file[], State *state, Grid* grid, Soil *soil)
{
/*
This function reads the initial state of the model atmosphere from a netcdf file.
*/
double *temperatures = malloc((NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS*sizeof(double));
int retval, ncid;
if ((retval = nc_open(init_state_file, NC_NOWRITE, &ncid)))
NCERR(retval);
int densities_id, temperatures_id, wind_id;
if ((retval = nc_inq_varid(ncid, "densities", &densities_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid, "temperatures", &temperatures_id)))
NCERR(retval);
if ((retval = nc_inq_varid(ncid, "wind", &wind_id)))
NCERR(retval);
if ((retval = nc_get_var_double(ncid, densities_id, &state -> rho[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid, temperatures_id, &temperatures[0])))
NCERR(retval);
if ((retval = nc_get_var_double(ncid, wind_id, &state -> wind[0])))
NCERR(retval);
if ((retval = nc_close(ncid)))
NCERR(retval);
// resricting the maximum relative humidity to 100 %
if (NO_OF_CONDENSED_CONSTITUENTS == 4)
{
#pragma omp parallel for
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
if (rel_humidity(state -> rho[(NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS + i], temperatures[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i]) > 1)
{
state -> rho[(NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS + i] = state -> rho[(NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS + i]
/rel_humidity(state -> rho[(NO_OF_CONDENSED_CONSTITUENTS + 1)*NO_OF_SCALARS + i], temperatures[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i]);
}
}
}
// determining the temperature densities of the condensates
#pragma omp parallel for
for (int i = 0; i < NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS; ++i)
{
state -> condensed_density_temperatures[i] = state -> rho[i]*temperatures[i];
}
// diagnostic thermodynamical quantities
double pressure, pot_temp;
#pragma omp parallel for private(pressure, pot_temp)
for (int i = 0; i < NO_OF_SCALARS; ++i)
{
pressure = state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i]*specific_gas_constants(0)*temperatures[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i];
pot_temp = temperatures[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i]*pow(P_0/pressure, specific_gas_constants(0)/spec_heat_capacities_p_gas(0));
state -> rhotheta[i] = state -> rho[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i]*pot_temp;
// calculating the potential temperature perturbation
state -> theta_pert[i] = pot_temp - grid -> theta_bg[i];
// calculating the Exner pressure perturbation
state -> exner_pert[i] = temperatures[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + i]/(grid -> theta_bg[i] + state -> theta_pert[i]) - grid -> exner_bg[i];
}
// checks
// checking for negative densities
# pragma omp parallel for
for (int i = 0; i < NO_OF_CONSTITUENTS*NO_OF_SCALARS; ++i)
{
if (state -> rho[i] < 0)
{
printf("Negative density found.\n");
printf("Aborting.\n");
exit(1);
}
}
// setting the soil temperature
set_soil_temp(grid, soil, state, temperatures, init_state_file);
free(temperatures);
// returning 0 indicating success
return 0;
}
int set_soil_temp(Grid *grid, Soil *soil, State *state, double temperatures[], char init_state_file[])
{
/*
This function sets the soil temperature.
*/
// figuring out if the SST is included in the init file and reading it if it exists (important for NWP)
double *sst = malloc(NO_OF_SCALARS_H*sizeof(double));
int sst_avail = 0;
if (strlen(init_state_file) != 0)
{
int ncid;
int retval;
if ((retval = nc_open(init_state_file, NC_NOWRITE, &ncid)))
NCERR(retval);
int sst_id;
// figuring out if the netcdf file contains SST
if (nc_inq_varid(ncid, "sst", &sst_id) == 0)
{
sst_avail = 1;
printf("SST found in initialization file.\n");
}
else
{
printf("SST not found in initialization file.\n");
}
// reading the SST data if it is present in the netcdf file
if (sst_avail == 1)
{
if ((retval = nc_get_var_double(ncid, sst_id, &sst[0])))
NCERR(retval);
}
// we do not need the netcdf file any further
if ((retval = nc_close(ncid)))
NCERR(retval);
}
// setting the soil temperature
int soil_index;
double z_soil, t_sfc;
#pragma omp parallel for private(soil_index, z_soil, t_sfc)
for (int i = 0; i < NO_OF_SCALARS_H; ++i)
{
// temperature at the surface
// land surface or sea surface if SST is unavailable
if (grid -> is_land[i] == 1 || (grid -> is_land[i] == 0 && sst_avail == 0))
{
t_sfc = temperatures[NO_OF_CONDENSED_CONSTITUENTS*NO_OF_SCALARS + NO_OF_SCALARS - NO_OF_SCALARS_H + i];
}
// sea surface if SST is available
else
{
t_sfc = sst[i];
}
// loop over all soil layers
for (int soil_layer_index = 0; soil_layer_index < NO_OF_SOIL_LAYERS; ++soil_layer_index)
{
// index of this soil grid point
soil_index = i + soil_layer_index*NO_OF_SCALARS_H;
z_soil = grid -> z_t_const/NO_OF_SOIL_LAYERS*(0.5 + soil_layer_index);
soil -> temperature[soil_index] = t_sfc + (grid -> t_const_soil - t_sfc)*z_soil/grid -> z_t_const;
}
}
free(sst);
// returning 0 indicating success
return 0;
}
|
rowWiseAverageOfMatrix.c | #include <stdio.h>
#include <omp.h>
int main(){
int i, j, n;
double sum;
printf("Enter matrix dimension = ");
scanf("%d", &n);
int a[n][n];
printf("Enter matrix values\n");
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
printf("a[%d][%d] = ", i, j);
scanf("%d", &a[i][j]);
}
}
omp_set_dynamic(0);
int m = omp_get_num_procs();
omp_set_num_threads(m);
#pragma omp parallel for shared(a) private(i, j, sum)
for (i = 0; i < n; i++){
sum = 0.0;
for (j = 0; j < n; j++){
sum += a[i][j];
}
printf("Row %d => Average = %.2f [thread %d of %d]\n", i, sum / n, omp_get_thread_num(),omp_get_num_threads());
}
return 0;
} |
u_atomic.h | /**
* Many similar implementations exist. See for example libwsbm
* or the linux kernel include/atomic.h
*
* No copyright claimed on this file.
*
*/
#ifdef __cplusplus
template<class T> class _IncludeInsideExternCNotPortable;
#endif
#ifndef U_ATOMIC_H
#define U_ATOMIC_H
#include <stdbool.h>
#include <stdint.h>
/* Favor OS-provided implementations.
*
* Where no OS-provided implementation is available, fall back to
* locally coded assembly, compiler intrinsic or ultimately a
* mutex-based implementation.
*/
#if defined(__sun)
#define PIPE_ATOMIC_OS_SOLARIS
#elif defined(_MSC_VER)
#define PIPE_ATOMIC_MSVC_INTRINSIC
#elif defined(__GNUC__)
#define PIPE_ATOMIC_GCC_INTRINSIC
#else
#error "Unsupported platform"
#endif
/* Implementation using GCC-provided synchronization intrinsics
*/
#if defined(PIPE_ATOMIC_GCC_INTRINSIC)
#define PIPE_ATOMIC "GCC Sync Intrinsics"
#if defined(USE_GCC_ATOMIC_BUILTINS)
/* The builtins with explicit memory model are available since GCC 4.7. */
#define p_atomic_set(_v, _i) __atomic_store_n((_v), (_i), __ATOMIC_RELEASE)
#define p_atomic_read(_v) __atomic_load_n((_v), __ATOMIC_ACQUIRE)
#define p_atomic_read_relaxed(_v) __atomic_load_n((_v), __ATOMIC_RELAXED)
#define p_atomic_dec_zero(v) (__atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL) == 0)
#define p_atomic_inc(v) (void) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
#define p_atomic_dec(v) (void) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
#define p_atomic_add(v, i) (void) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
#define p_atomic_inc_return(v) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
#define p_atomic_dec_return(v) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
#define p_atomic_add_return(v, i) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
#define p_atomic_xchg(v, i) __atomic_exchange_n((v), (i), __ATOMIC_ACQ_REL)
#define PIPE_NATIVE_ATOMIC_XCHG
#else
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
#define p_atomic_read_relaxed(_v) (*(_v))
#define p_atomic_dec_zero(v) (__sync_sub_and_fetch((v), 1) == 0)
#define p_atomic_inc(v) (void) __sync_add_and_fetch((v), 1)
#define p_atomic_dec(v) (void) __sync_sub_and_fetch((v), 1)
#define p_atomic_add(v, i) (void) __sync_add_and_fetch((v), (i))
#define p_atomic_inc_return(v) __sync_add_and_fetch((v), 1)
#define p_atomic_dec_return(v) __sync_sub_and_fetch((v), 1)
#define p_atomic_add_return(v, i) __sync_add_and_fetch((v), (i))
#endif
/* There is no __atomic_* compare and exchange that returns the current value.
* Also, GCC 5.4 seems unable to optimize a compound statement expression that
* uses an additional stack variable with __atomic_compare_exchange[_n].
*/
#define p_atomic_cmpxchg(v, old, _new) \
__sync_val_compare_and_swap((v), (old), (_new))
#endif
/* Unlocked version for single threaded environments, such as some
* windows kernel modules.
*/
#if defined(PIPE_ATOMIC_OS_UNLOCKED)
#define PIPE_ATOMIC "Unlocked"
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
#define p_atomic_read_relaxed(_v) (*(_v))
#define p_atomic_dec_zero(_v) (p_atomic_dec_return(_v) == 0)
#define p_atomic_inc(_v) ((void) p_atomic_inc_return(_v))
#define p_atomic_dec(_v) ((void) p_atomic_dec_return(_v))
#define p_atomic_add(_v, _i) ((void) p_atomic_add_return((_v), (_i)))
#define p_atomic_inc_return(_v) (++(*(_v)))
#define p_atomic_dec_return(_v) (--(*(_v)))
#define p_atomic_add_return(_v, _i) (*(_v) = *(_v) + (_i))
#define p_atomic_cmpxchg(_v, _old, _new) (*(_v) == (_old) ? (*(_v) = (_new), (_old)) : *(_v))
#endif
#if defined(PIPE_ATOMIC_MSVC_INTRINSIC)
#define PIPE_ATOMIC "MSVC Intrinsics"
/* We use the Windows header's Interlocked*64 functions instead of the
* _Interlocked*64 intrinsics wherever we can, as support for the latter varies
* with target CPU, whereas Windows headers take care of all portability
* issues: using intrinsics where available, falling back to library
* implementations where not.
*/
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN 1
#endif
#include <windows.h>
#include <intrin.h>
#include <assert.h>
/* MSVC supports decltype keyword, but it's only supported on C++ and doesn't
* quite work here; and if a C++-only solution is worthwhile, then it would be
* better to use templates / function overloading, instead of decltype magic.
* Therefore, we rely on implicit casting to LONGLONG for the functions that return
*/
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
#define p_atomic_read_relaxed(_v) (*(_v))
#define p_atomic_dec_zero(_v) \
(p_atomic_dec_return(_v) == 0)
#define p_atomic_inc(_v) \
((void) p_atomic_inc_return(_v))
#define p_atomic_inc_return(_v) (\
sizeof *(_v) == sizeof(short) ? _InterlockedIncrement16((short *) (_v)) : \
sizeof *(_v) == sizeof(long) ? _InterlockedIncrement ((long *) (_v)) : \
sizeof *(_v) == sizeof(__int64) ? InterlockedIncrement64 ((__int64 *)(_v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_dec(_v) \
((void) p_atomic_dec_return(_v))
#define p_atomic_dec_return(_v) (\
sizeof *(_v) == sizeof(short) ? _InterlockedDecrement16((short *) (_v)) : \
sizeof *(_v) == sizeof(long) ? _InterlockedDecrement ((long *) (_v)) : \
sizeof *(_v) == sizeof(__int64) ? InterlockedDecrement64 ((__int64 *)(_v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_add(_v, _i) \
((void) p_atomic_add_return((_v), (_i)))
#define p_atomic_add_return(_v, _i) (\
sizeof *(_v) == sizeof(char) ? _InterlockedExchangeAdd8 ((char *) (_v), (_i)) : \
sizeof *(_v) == sizeof(short) ? _InterlockedExchangeAdd16((short *) (_v), (_i)) : \
sizeof *(_v) == sizeof(long) ? _InterlockedExchangeAdd ((long *) (_v), (_i)) : \
sizeof *(_v) == sizeof(__int64) ? InterlockedExchangeAdd64((__int64 *)(_v), (_i)) : \
(assert(!"should not get here"), 0))
#define p_atomic_cmpxchg(_v, _old, _new) (\
sizeof *(_v) == sizeof(char) ? _InterlockedCompareExchange8 ((char *) (_v), (char) (_new), (char) (_old)) : \
sizeof *(_v) == sizeof(short) ? _InterlockedCompareExchange16((short *) (_v), (short) (_new), (short) (_old)) : \
sizeof *(_v) == sizeof(long) ? _InterlockedCompareExchange ((long *) (_v), (long) (_new), (long) (_old)) : \
sizeof *(_v) == sizeof(__int64) ? InterlockedCompareExchange64 ((__int64 *)(_v), (__int64)(_new), (__int64)(_old)) : \
(assert(!"should not get here"), 0))
//#define p_fetch_and_add(_v, _old, _new) (\
// sizeof *(_v) == sizeof(char) ? _InterlockedCompareExchange8 ((char *) (_v), (char) (_new), (char) (_old)) : \
// sizeof *(_v) == sizeof(short) ? _InterlockedCompareExchange16((short *) (_v), (short) (_new), (short) (_old)) : \
// sizeof *(_v) == sizeof(long) ? _InterlockedCompareExchange ((long *) (_v), (long) (_new), (long) (_old)) : \
// sizeof *(_v) == sizeof(__int64) ? _InterlockedAnd64 ((__int64 *)(_v), (__int64)(_new), (__int64)(_old)) : \
// (assert(!"should not get here"), 0))
#define p_fetch_and_add(_v, _i) (\
sizeof *(_v) == sizeof(char) ? _InterlockedAnd8 ((char *) (_v), (_i)) : \
sizeof *(_v) == sizeof(short) ? _InterlockedAnd16((short *) (_v), (_i)) : \
sizeof *(_v) == sizeof(long) ? _InterlockedAnd ((long *) (_v), (_i)) : \
sizeof *(_v) == sizeof(__int64) ? _InterlockedAnd64((__int64 *)(_v), (_i)) : \
(assert(!"should not get here"), 0))
#define p_fetch_and_sub(_v, _i) (\
sizeof *(_v) == sizeof(char) ? _InterlockedAnd8 ((char *) (_v), -char(_i)) : \
sizeof *(_v) == sizeof(short) ? _InterlockedAnd16((short *) (_v), -short(_i)) : \
sizeof *(_v) == sizeof(long) ? _InterlockedAnd ((long *) (_v), -long(_i)) : \
sizeof *(_v) == sizeof(__int64) ? _InterlockedAnd64((__int64 *)(_v), -__int64(_i)) : \
(assert(!"should not get here"), 0))
/** @brief Compare @c *ptr and @c comparand. If equal, let @c
* *ptr=replacement and return @c true, return @c false otherwise.
*
* Implementation is heavily platform-dependent.
* @param ptr Pointer to 32-bit signed integer.
* @param comparand Compare value.
* @param replacement Replacement value.
*/
//inline bool
//compare_and_swap_32(volatile int32* ptr, int32 comparand, int32 replacement)
//{
//#if defined(__ICC) //x86 version
// return _InterlockedCompareExchange((void*)ptr, replacement,
// comparand) == comparand;
//#elif defined(__ECC) //IA-64 version
// return _InterlockedCompareExchange((void*)ptr, replacement,
// comparand) == comparand;
//#elif defined(__ICL) || defined(_MSC_VER)
// return _InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr),
// replacement, comparand) == comparand;
//#elif defined(__GNUC__)
// return compare_and_swap_64(ptr, comparand, replacement);
//#elif defined(__SUNPRO_CC) && defined(__sparc)
// return atomic_cas_32((volatile unsigned int*)ptr, comparand,
// replacement) == comparand;
//#else
//#pragma message("slow compare_and_swap_32")
// bool res = false;
//#pragma omp critical
// {
// if (*ptr == comparand)
// {
// *ptr = replacement;
// res = true;
// }
// }
// return res;
//#endif
//}
//
///** @brief Compare @c *ptr and @c comparand. If equal, let @c
// * *ptr=replacement and return @c true, return @c false otherwise.
// *
// * Implementation is heavily platform-dependent.
// * @param ptr Pointer to 64-bit signed integer.
// * @param comparand Compare value.
// * @param replacement Replacement value.
// */
//inline bool
//compare_and_swap_64(volatile int64_t* ptr, int64_t comparand, int64_t replacement)
//{
//#if defined(__ICC) && defined(__x86_64) //x86 version
// return cas64<int>(ptr, comparand, replacement) == comparand;
//#elif defined(__ECC) //IA-64 version
// return _InterlockedCompareExchange64((void*)ptr, replacement,
// comparand) == comparand;
//#elif defined(__ICL) || defined(_MSC_VER)
//#ifndef _WIN64
// _GLIBCXX_PARALLEL_ASSERT(false); //not available in this case
// return 0;
//#else
//
// return _InterlockedCompareExchange64(ptr, replacement,
// comparand) == comparand;
//#endif
//
//#elif defined(__GNUC__) && defined(__x86_64)
// return compare_and_swap_64(ptr, comparand, replacement);
//#elif defined(__GNUC__) && defined(__i386) && \
// (defined(__i686) || defined(__pentium4) || defined(__athlon))
// return compare_and_swap_64(ptr, comparand, replacement);
//#elif defined(__SUNPRO_CC) && defined(__sparc)
// return atomic_cas_64((volatile unsigned long long*)ptr,
// comparand, replacement) == comparand;
//#else
//#if defined(__GNUC__) && defined(__i386)
// // XXX -march=native
// //#warning "please compile with -march=i686 or better"
//#endif
//#pragma message("slow compare_and_swap_64")
// bool res = false;
//#pragma omp critical
// {
// if (*ptr == comparand)
// {
// *ptr = replacement;
// res = true;
// }
// }
// return res;
//#endif
// }
#endif
#if defined(PIPE_ATOMIC_OS_SOLARIS)
#define PIPE_ATOMIC "Solaris OS atomic functions"
#include <atomic.h>
#include <assert.h>
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
#define p_atomic_dec_zero(v) (\
sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8_nv ((uint8_t *)(v)) == 0 : \
sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) == 0 : \
sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) == 0 : \
sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) == 0 : \
(assert(!"should not get here"), 0))
#define p_atomic_inc(v) (void) (\
sizeof(*v) == sizeof(uint8_t) ? atomic_inc_8 ((uint8_t *)(v)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16((uint16_t *)(v)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32((uint32_t *)(v)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64((uint64_t *)(v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_inc_return(v) (__typeof(*v))( \
sizeof(*v) == sizeof(uint8_t) ? atomic_inc_8_nv ((uint8_t *)(v)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16_nv((uint16_t *)(v)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32_nv((uint32_t *)(v)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64_nv((uint64_t *)(v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_dec(v) (void) ( \
sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8 ((uint8_t *)(v)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16((uint16_t *)(v)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32((uint32_t *)(v)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64((uint64_t *)(v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_dec_return(v) (__typeof(*v))( \
sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8_nv ((uint8_t *)(v)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_add(v, i) (void) ( \
sizeof(*v) == sizeof(uint8_t) ? atomic_add_8 ((uint8_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_add_16((uint16_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_add_32((uint32_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_add_64((uint64_t *)(v), (i)) : \
(assert(!"should not get here"), 0))
#define p_atomic_add_return(v, i) (__typeof(*v)) ( \
sizeof(*v) == sizeof(uint8_t) ? atomic_add_8_nv ((uint8_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_add_16_nv((uint16_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_add_32_nv((uint32_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_add_64_nv((uint64_t *)(v), (i)) : \
(assert(!"should not get here"), 0))
#define p_atomic_cmpxchg(v, old, _new) (__typeof(*v))( \
sizeof(*v) == sizeof(uint8_t) ? atomic_cas_8 ((uint8_t *)(v), (uint8_t )(old), (uint8_t )(_new)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_cas_16((uint16_t *)(v), (uint16_t)(old), (uint16_t)(_new)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_cas_32((uint32_t *)(v), (uint32_t)(old), (uint32_t)(_new)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_cas_64((uint64_t *)(v), (uint64_t)(old), (uint64_t)(_new)) : \
(assert(!"should not get here"), 0))
#endif
#ifndef PIPE_ATOMIC
#error "No pipe_atomic implementation selected"
#endif
#ifndef PIPE_NATIVE_ATOMIC_XCHG
static inline uint32_t p_atomic_xchg_32(uint32_t* v, uint32_t i)
{
uint32_t actual = p_atomic_read(v);
uint32_t expected;
do {
expected = actual;
actual = p_atomic_cmpxchg(v, expected, i);
} while (expected != actual);
return actual;
}
static inline uint64_t p_atomic_xchg_64(uint64_t* v, uint64_t i)
{
uint64_t actual = p_atomic_read(v);
uint64_t expected;
do {
expected = actual;
actual = p_atomic_cmpxchg(v, expected, i);
} while (expected != actual);
return actual;
}
#define p_atomic_xchg(v, i) (__typeof(*(v)))( \
sizeof(*(v)) == sizeof(uint32_t) ? p_atomic_xchg_32((uint32_t *)(v), (uint32_t)(i)) : \
sizeof(*(v)) == sizeof(uint64_t) ? p_atomic_xchg_64((uint64_t *)(v), (uint64_t)(i)) : \
(assert(!"should not get here"), 0))
#endif
#endif /* U_ATOMIC_H */
|
cosapi.h | #ifndef COS_COS_COSAPI_H
#define COS_COS_COSAPI_H
/**
* C Object System
* COS api
*
* Copyright 2006+ Laurent Deniau <laurent.deniau@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef COS_COS_COS_H
#error "COS: use <cos/cos/cos.h> instead of <cos/cos/cosapi.h>"
#endif
// low-level API
void cos_init(void);
void cos_deinit(void);
double cos_initDuration(void); // in second.
double cos_deinitDuration(void); // in second.
BOOL cos_object_isKindOf(OBJ,const struct Class*);
BOOL cos_object_changeClass(OBJ,const struct Class*);
BOOL cos_object_unsafeChangeClass(OBJ,const struct Class*,const struct Class*);
struct Generic* cos_generic_get(U32);
struct Generic* cos_generic_getWithStr(STR);
struct Class* cos_class_get(U32);
struct Class* cos_class_getWithStr(STR);
struct Class* cos_property_getWithStr(STR);
BOOL cos_class_isSubclassOf (const struct Class*, const struct Class*);
U32 cos_class_readProperties (const struct Class*,U32,const struct Class**,U32);
U32 cos_class_writeProperties(const struct Class*,U32,const struct Class**,U32);
IMP1 cos_method_get1(SEL,U32);
IMP2 cos_method_get2(SEL,U32,U32);
IMP3 cos_method_get3(SEL,U32,U32,U32);
IMP4 cos_method_get4(SEL,U32,U32,U32,U32);
IMP5 cos_method_get5(SEL,U32,U32,U32,U32,U32);
IMP1 cos_method_lookup1(SEL,U32);
IMP2 cos_method_lookup2(SEL,U32,U32);
IMP3 cos_method_lookup3(SEL,U32,U32,U32);
IMP4 cos_method_lookup4(SEL,U32,U32,U32,U32);
IMP5 cos_method_lookup5(SEL,U32,U32,U32,U32,U32);
/* inlined functions (see cos/cos/dispatch.h)
IMP1 cos_method_fastLookup1(SEL,U32);
IMP2 cos_method_fastLookup2(SEL,U32,U32);
IMP3 cos_method_fastLookup3(SEL,U32,U32,U32);
IMP4 cos_method_fastLookup4(SEL,U32,U32,U32,U32);
IMP5 cos_method_fastLookup5(SEL,U32,U32,U32,U32,U32);
BOOL cos_method_understand1(SEL,U32);
BOOL cos_method_understand2(SEL,U32,U32);
BOOL cos_method_understand3(SEL,U32,U32,U32);
BOOL cos_method_understand4(SEL,U32,U32,U32,U32);
BOOL cos_method_understand5(SEL,U32,U32,U32,U32,U32);
*/
char* cos_method_name(const struct Method*,char*,U32);
char* cos_method_call(SEL,OBJ*,char*,U32);
char* cos_method_callName(const struct Method*,OBJ*,char*,U32);
void (*cos_method_trace)(STR,int,BOOL,const struct Method*,OBJ*);
void cos_method_clearCache1(void);
void cos_method_clearCache2(void);
void cos_method_clearCache3(void);
void cos_method_clearCache4(void);
void cos_method_clearCache5(void);
void cos_method_clearCaches(void);
void cos_contract_invariant1(OBJ,STR,int);
void cos_contract_invariant2(OBJ,OBJ,STR,int);
void cos_contract_invariant3(OBJ,OBJ,OBJ,STR,int);
void cos_contract_invariant4(OBJ,OBJ,OBJ,OBJ,STR,int);
void cos_contract_invariant5(OBJ,OBJ,OBJ,OBJ,OBJ,STR,int);
int cos_contract_setLevel (int lvl); // return previous level
void cos_exception_assert (STR,STR,int) __attribute__((__noreturn__));
void cos_exception_errno (int,STR,int) __attribute__((__noreturn__));
void cos_exception_badcast(OBJ,const struct Class*,STR,int)
__attribute__((__noreturn__));
void cos_exception_throw (OBJ,STR,int) __attribute__((__noreturn__));
BOOL cos_exception_catch (OBJ,OBJ);
BOOL cos_exception_uncaught(void);
void cos_exception_initContext(struct cos_exception_context*);
void cos_exception_deinitContext(struct cos_exception_context*);
cos_exception_handler cos_exception_setTerminate(cos_exception_handler);
void cos_functor_overflow(void);
void cos_functor_underflow(void) __attribute__((__noreturn__));
void cos_functor_clearContext(void);
void cos_module_load(STR*); // null terminated array of module names
/* NOTE-INFO: loggers
- prototype: void cos_xxx(STR fmt, ...);
- a '\n' is automatically added to the end
- they can be turned on/off with cos_logmsg_set.
- all these handlers display on cos_logmsg_out [default=stderr]
- to access to cos_logmsg_out, you must include cos/cos/debug.h
*/
#define cos_trace(...) \
cos_logmsg(COS_LOGMSG_TRACE,__FILE__,__LINE__,__VA_ARGS__)
#define cos_debug(...) \
cos_logmsg(COS_LOGMSG_DEBUG,__FILE__,__LINE__,__VA_ARGS__)
#define cos_info(...) \
cos_logmsg(COS_LOGMSG_INFO ,__FILE__,__LINE__,__VA_ARGS__)
#define cos_warn( ...) \
cos_logmsg(COS_LOGMSG_WARN ,__FILE__,__LINE__,__VA_ARGS__)
#define cos_error(...) \
cos_logmsg(COS_LOGMSG_ERROR,__FILE__,__LINE__,__VA_ARGS__)
#define cos_abort(...) \
cos_logmsg(COS_LOGMSG_ABORT,__FILE__,__LINE__,__VA_ARGS__)
#define cos_logmsg(lvl,file,line,...) \
((void)(cos_logmsg_level_ <= (lvl) && (cos_logmsg_(lvl,file,line,__VA_ARGS__),0)))
// topic-specific debug
#define COS_DEBUG_IF(topic,...) \
((void)(COS_PP_CAT(DEBUG_,topic) && (cos_debug(__VA_ARGS__),0)))
void cos_logmsg_(int,STR,int,STR,...) __attribute__((__format__(__printf__,4,5)));
int cos_logmsg_setLevel(int lvl); // return previous level
/* NOTE-INFO: auto ctor
the following function-like macro is useful to define automatic ctor
for classes deriving from Object (or at equivalent level)
*/
#define cos_object_auto(cls) \
{{ COS_CLS_NAME(cls).Behavior.id, COS_RC_AUTO }}
/***********************************************************
* Implementation
*/
// COS symbols init
void cos_symbol_init(void);
void cos_symbol_register(struct Any**, STR tag);
// next-method
void cos_method_nextClear(void);
void cos_method_nextInit(FCT*,SEL,U32,U32,struct Class* const*);
// 2nd and 3rd levels dispatch
IMP1 cos_method_fastLookup1_(struct cos_method_slot1*restrict*restrict,SEL,U32);
IMP2 cos_method_fastLookup2_(struct cos_method_slot2*restrict*restrict,SEL,U32,U32);
IMP3 cos_method_fastLookup3_(struct cos_method_slot3*restrict*restrict,SEL,U32,U32,U32);
IMP4 cos_method_fastLookup4_(struct cos_method_slot4*restrict*restrict,SEL,U32,U32,U32,U32);
IMP5 cos_method_fastLookup5_(struct cos_method_slot5*restrict*restrict,SEL,U32,U32,U32,U32,U32);
BOOL cos_method_understand1_(struct cos_method_slot1*restrict*restrict,SEL,U32);
BOOL cos_method_understand2_(struct cos_method_slot2*restrict*restrict,SEL,U32,U32);
BOOL cos_method_understand3_(struct cos_method_slot3*restrict*restrict,SEL,U32,U32,U32);
BOOL cos_method_understand4_(struct cos_method_slot4*restrict*restrict,SEL,U32,U32,U32,U32);
BOOL cos_method_understand5_(struct cos_method_slot5*restrict*restrict,SEL,U32,U32,U32,U32,U32);
// logger message level (not thread safe)
extern int cos_logmsg_level_;
/***********************************************************
* Inlined functions
*/
#if defined(_OPENMP) || COS_HAS_TLS || !COS_HAS_POSIX // --------------------
#ifdef _OPENMP
#include <omp.h>
#endif
static cos_inline struct cos_functor_context*
cos_functor_context(void)
{
extern __thread struct cos_functor_context cos_functor_context_;
#ifdef _OPENMP
#pragma omp threadprivate(cos_functor_context_)
#endif
return &cos_functor_context_;
COS_UNUSED(cos_functor_context);
}
static cos_inline struct cos_method_cache1*
cos_method_cache1(void)
{
extern __thread struct cos_method_cache1 cos_method_cache1_;
#ifdef _OPENMP
#pragma omp threadprivate(cos_method_cache1_)
#endif
return &cos_method_cache1_;
COS_UNUSED(cos_method_cache1);
}
static cos_inline struct cos_method_cache2*
cos_method_cache2(void)
{
extern __thread struct cos_method_cache2 cos_method_cache2_;
#ifdef _OPENMP
#pragma omp threadprivate(cos_method_cache2_)
#endif
return &cos_method_cache2_;
COS_UNUSED(cos_method_cache2);
}
static cos_inline struct cos_method_cache3*
cos_method_cache3(void)
{
extern __thread struct cos_method_cache3 cos_method_cache3_;
#ifdef _OPENMP
#pragma omp threadprivate(cos_method_cache3_)
#endif
return &cos_method_cache3_;
COS_UNUSED(cos_method_cache3);
}
static cos_inline struct cos_method_cache4*
cos_method_cache4(void)
{
extern __thread struct cos_method_cache4 cos_method_cache4_;
#ifdef _OPENMP
#pragma omp threadprivate(cos_method_cache4_)
#endif
return &cos_method_cache4_;
COS_UNUSED(cos_method_cache4);
}
static cos_inline struct cos_method_cache5*
cos_method_cache5(void)
{
extern __thread struct cos_method_cache5 cos_method_cache5_;
#ifdef _OPENMP
#pragma omp threadprivate(cos_method_cache5_)
#endif
return &cos_method_cache5_;
COS_UNUSED(cos_method_cache5);
}
static cos_inline struct cos_exception_context*
cos_exception_context(void)
{
extern __thread struct cos_exception_context *cos_exception_cxt_;
#ifdef _OPENMP
#pragma omp threadprivate(cos_exception_cxt_)
#endif
return cos_exception_cxt_;
COS_UNUSED(cos_exception_context);
}
#else // !defined(_OPENMP) && !COS_HAS_TLS && COS_HAS_POSIX -----------------
#if COS_HAS_POSIX
#include <pthread.h>
#endif
struct cos_exception_context* cos_exception_context (void);
struct cos_functor_context* cos_functor_context_init(void);
struct cos_method_cache1* cos_method_cache1_init (void);
struct cos_method_cache2* cos_method_cache2_init (void);
struct cos_method_cache3* cos_method_cache3_init (void);
struct cos_method_cache4* cos_method_cache4_init (void);
struct cos_method_cache5* cos_method_cache5_init (void);
static cos_inline struct cos_functor_context*
cos_functor_context(void)
{
struct cos_functor_context *context;
extern int cos_functor_context_key_init;
extern pthread_key_t cos_functor_context_key;
if (! cos_functor_context_key_init ||
!(context = pthread_getspecific(cos_functor_context_key)))
context = cos_functor_context_init();
return context;
COS_UNUSED(cos_functor_context);
}
static cos_inline struct cos_method_cache1*
cos_method_cache1(void)
{
struct cos_method_cache1 *cache;
extern int cos_method_cache1_key_init;
extern pthread_key_t cos_method_cache1_key;
if (! cos_method_cache1_key_init ||
!(cache = pthread_getspecific(cos_method_cache1_key)))
cache = cos_method_cache1_init();
return cache;
COS_UNUSED(cos_method_cache1);
}
static cos_inline struct cos_method_cache2*
cos_method_cache2(void)
{
struct cos_method_cache2 *cache;
extern int cos_method_cache2_key_init;
extern pthread_key_t cos_method_cache2_key;
if (! cos_method_cache2_key_init ||
!(cache = pthread_getspecific(cos_method_cache2_key)))
cache = cos_method_cache2_init();
return cache;
COS_UNUSED(cos_method_cache2);
}
static cos_inline struct cos_method_cache3*
cos_method_cache3(void)
{
struct cos_method_cache3 *cache;
extern int cos_method_cache3_key_init;
extern pthread_key_t cos_method_cache3_key;
if (! cos_method_cache3_key_init ||
!(cache = pthread_getspecific(cos_method_cache3_key)))
cache = cos_method_cache3_init();
return cache;
COS_UNUSED(cos_method_cache3);
}
static cos_inline struct cos_method_cache4*
cos_method_cache4(void)
{
struct cos_method_cache4 *cache;
extern int cos_method_cache4_key_init;
extern pthread_key_t cos_method_cache4_key;
if (! cos_method_cache4_key_init ||
!(cache = pthread_getspecific(cos_method_cache4_key)))
cache = cos_method_cache4_init();
return cache;
COS_UNUSED(cos_method_cache4);
}
static cos_inline struct cos_method_cache5*
cos_method_cache5(void)
{
struct cos_method_cache5 *cache;
extern int cos_method_cache5_key_init;
extern pthread_key_t cos_method_cache5_key;
if (! cos_method_cache5_key_init ||
!(cache = pthread_getspecific(cos_method_cache5_key)))
cache = cos_method_cache5_init();
return cache;
COS_UNUSED(cos_method_cache5);
}
#endif // ------------------------------------------------
static cos_inline OBJ
cos_ident(OBJ obj)
{
return obj;
COS_UNUSED(cos_ident);
}
static cos_inline U32
cos_object_id(OBJ obj)
{
return ((struct Any*)obj)->_id;
COS_UNUSED(cos_object_id);
}
static cos_inline OBJ
cos_object_setId(OBJ obj, U32 id)
{
return ((struct Any*)obj)->_id = id, obj;
COS_UNUSED(cos_object_setId);
}
static cos_inline OBJ
cos_object_setIdAuto(OBJ res, OBJ obj)
{
((struct Any*)res)->_id = ((struct Any*)obj)->_id;
((struct Any*)res)->_rc = COS_RC_AUTO;
return res;
COS_UNUSED(cos_object_setIdAuto);
}
static cos_inline I32
cos_object_rc(OBJ obj)
{
return ((struct Any*)obj)->_rc;
COS_UNUSED(cos_object_rc);
}
static cos_inline OBJ
cos_object_setRc(OBJ obj, I32 rc)
{
return ((struct Any*)obj)->_rc = rc, obj;
COS_UNUSED(cos_object_setRc);
}
static cos_inline OBJ
cos_object_incRc(OBJ obj)
{
return ((struct Any*)obj)->_rc++, obj;
COS_UNUSED(cos_object_incRc);
}
static cos_inline OBJ
cos_object_decRc(OBJ obj)
{
return ((struct Any*)obj)->_rc--, obj;
COS_UNUSED(cos_object_decRc);
}
static cos_inline struct Class*
cos_object_class(OBJ obj)
{
return cos_class_get(cos_object_id(obj));
COS_UNUSED(cos_object_class);
}
static cos_inline struct Class*
cos_object_superClass(OBJ obj)
{
return cos_object_class(obj)->spr;
COS_UNUSED(cos_object_superClass);
}
static cos_inline STR
cos_object_className(OBJ obj)
{
return cos_object_class(obj)->str;
COS_UNUSED(cos_object_className);
}
static cos_inline U32
cos_class_id(const struct Class *cls)
{
return cls->Behavior.id;
COS_UNUSED(cos_class_id);
}
static cos_inline U32
cos_generic_id(SEL sel)
{
return sel->Behavior.id;
COS_UNUSED(cos_generic_id);
}
static cos_inline BOOL
cos_object_isa(OBJ obj, const struct Class *cls)
{
return cos_object_id(obj) == cos_class_id(cls);
COS_UNUSED(cos_object_isa);
}
static cos_inline void*
cos_object_cast(OBJ obj, const struct Class *cls)
{
return obj && cos_object_isa(obj, cls) ? obj : 0;
COS_UNUSED(cos_object_cast);
}
static cos_inline void*
cos_object_ecast(OBJ obj, const struct Class *cls, STR file, int line)
{
if (obj && cos_object_isa(obj, cls))
return obj;
cos_exception_badcast(obj, cls, file, line);
COS_UNUSED(cos_object_ecast);
}
static cos_inline void*
cos_object_dyncast(OBJ obj, const struct Class *cls)
{
return obj && (cos_object_isa(obj, cls) || cos_object_isKindOf(obj, cls)) ? obj : 0;
COS_UNUSED(cos_object_dyncast);
}
static cos_inline void*
cos_object_edyncast(OBJ obj, const struct Class *cls, STR file, int line)
{
if (obj && (
cos_object_isa (obj, cls) ||
cos_object_isKindOf(obj, cls)))
return obj;
cos_exception_badcast(obj, cls, file, line);
COS_UNUSED(cos_object_edyncast);
}
static cos_inline BOOL
cos_arginfo_isObject(struct cos_generic_arginfo *info)
{
return !info->size;
COS_UNUSED(cos_arginfo_isObject);
}
static cos_inline U32
cos_arginfo_size(struct cos_generic_arginfo *info)
{
return info->size ? info->size : sizeof(OBJ);
COS_UNUSED(cos_arginfo_size);
}
static cos_inline struct cos_functor_context*
cos_functor_ensure(int n)
{
struct cos_functor_context *cxt = cos_functor_context();
if (cxt->top + n > cxt->end)
cos_functor_overflow();
return cxt;
COS_UNUSED(cos_functor_ensure);
}
static cos_inline struct cos_functor_context*
cos_functor_require(int n)
{
struct cos_functor_context *cxt = cos_functor_context();
if (cxt->stk + n > cxt->top)
cos_functor_underflow();
return cxt;
COS_UNUSED(cos_functor_require);
}
static cos_inline struct cos_exception_protect
cos_exception_protect(struct cos_exception_protect *ptr, OBJ const *obj)
{
struct cos_exception_context *cxt = cos_exception_context();
ptr->prv = cxt->stk;
ptr->obj = obj;
cxt->stk = ptr;
return *ptr;
COS_UNUSED(cos_exception_protect);
}
static cos_inline struct cos_exception_extendedProtect
cos_exception_objectProtect(struct cos_exception_extendedProtect *ptr,
OBJ const *alt, FCT1 fct)
{
struct cos_exception_context *cxt = cos_exception_context();
ptr->prv = cxt->stk;
ptr->obj = (OBJ*)COS_YES;
ptr->alt = (void *const*)alt;
ptr->fct = (FCTV)fct;
cxt->stk = (void*)ptr;
return *ptr;
COS_UNUSED(cos_exception_objectProtect);
}
static cos_inline struct cos_exception_extendedProtect
cos_exception_extendedProtect(struct cos_exception_extendedProtect *ptr,
void *const *alt, FCTV fct)
{
struct cos_exception_context *cxt = cos_exception_context();
ptr->prv = cxt->stk;
ptr->obj = (OBJ*)COS_YES;
ptr->alt = alt;
ptr->fct = fct;
cxt->stk = (void*)ptr;
return *ptr;
COS_UNUSED(cos_exception_extendedProtect);
}
#endif // COS_COS_COSAPI_H
|
distort.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT %
% D D I SS T O O R R T %
% D D I SSS T O O RRRR T %
% D D I SS T O O R R T %
% DDDD IIIII SSSSS T OOO R R T %
% %
% %
% MagickCore Image Distortion Methods %
% %
% Software Design %
% John Cristy %
% Anthony Thyssen %
% June 2007 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distort.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/registry.h"
#include "magick/semaphore.h"
#include "magick/shear.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
Numerous internal routines for image distortions.
*/
static inline double MagickMin(const double x,const double y)
{
return( x < y ? x : y);
}
static inline double MagickMax(const double x,const double y)
{
return( x > y ? x : y);
}
static inline void AffineArgsToCoefficients(double *affine)
{
/* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4];
affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3];
}
static inline void CoefficientsToAffineArgs(double *coeff)
{
/* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */
double tmp[4]; /* note indexes 0 and 5 remain unchanged */
tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2];
coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3];
}
static void InvertAffineCoefficients(const double *coeff,double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 50 */
double determinant;
determinant=1.0/(coeff[0]*coeff[4]-coeff[1]*coeff[3]);
inverse[0]=determinant*coeff[4];
inverse[1]=determinant*(-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]);
inverse[3]=determinant*(-coeff[3]);
inverse[4]=determinant*coeff[0];
inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]);
}
static void InvertPerspectiveCoefficients(const double *coeff,
double *inverse)
{
/* From "Digital Image Warping" by George Wolberg, page 53 */
double determinant;
determinant=1.0/(coeff[0]*coeff[4]-coeff[3]*coeff[1]);
inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]);
inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]);
inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]);
inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]);
inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]);
inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]);
inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]);
inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]);
}
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if (x >= 0.0)
return((double) ((ssize_t) (x+0.5)));
return((double) ((ssize_t) (x-0.5)));
}
/*
* Polynomial Term Defining Functions
*
* Order must either be an integer, or 1.5 to produce
* the 2 number_valuesal polynomial function...
* affine 1 (3) u = c0 + c1*x + c2*y
* bilinear 1.5 (4) u = '' + c3*x*y
* quadratic 2 (6) u = '' + c4*x*x + c5*y*y
* cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3
* quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4
* quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5
* number in parenthesis minimum number of points needed.
* Anything beyond quintic, has not been implemented until
* a more automated way of determining terms is found.
* Note the slight re-ordering of the terms for a quadratic polynomial
* which is to allow the use of a bi-linear (order=1.5) polynomial.
* All the later polynomials are ordered simply from x^N to y^N
*/
static size_t poly_number_terms(double order)
{
/* Return the number of terms for a 2d polynomial */
if ( order < 1 || order > 5 ||
( order != floor(order) && (order-1.5) > MagickEpsilon) )
return 0; /* invalid polynomial order */
return((size_t) floor((order+1)*(order+2)/2));
}
static double poly_basis_fn(ssize_t n, double x, double y)
{
/* Return the result for this polynomial term */
switch(n) {
case 0: return( 1.0 ); /* constant */
case 1: return( x );
case 2: return( y ); /* affine order = 1 terms = 3 */
case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x*x );
case 5: return( y*y ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x*x );
case 7: return( x*x*y );
case 8: return( x*y*y );
case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x*x );
case 11: return( x*x*x*y );
case 12: return( x*x*y*y );
case 13: return( x*y*y*y );
case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x*x );
case 16: return( x*x*x*x*y );
case 17: return( x*x*x*y*y );
case 18: return( x*x*y*y*y );
case 19: return( x*y*y*y*y );
case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */
}
return( 0 ); /* should never happen */
}
static const char *poly_basis_str(ssize_t n)
{
/* return the result for this polynomial term */
switch(n) {
case 0: return(""); /* constant */
case 1: return("*ii");
case 2: return("*jj"); /* affine order = 1 terms = 3 */
case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */
case 4: return("*ii*ii");
case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */
case 6: return("*ii*ii*ii");
case 7: return("*ii*ii*jj");
case 8: return("*ii*jj*jj");
case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */
case 10: return("*ii*ii*ii*ii");
case 11: return("*ii*ii*ii*jj");
case 12: return("*ii*ii*jj*jj");
case 13: return("*ii*jj*jj*jj");
case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */
case 15: return("*ii*ii*ii*ii*ii");
case 16: return("*ii*ii*ii*ii*jj");
case 17: return("*ii*ii*ii*jj*jj");
case 18: return("*ii*ii*jj*jj*jj");
case 19: return("*ii*jj*jj*jj*jj");
case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */
}
return( "UNKNOWN" ); /* should never happen */
}
static double poly_basis_dx(ssize_t n, double x, double y)
{
/* polynomial term for x derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 1.0 );
case 2: return( 0.0 ); /* affine order = 1 terms = 3 */
case 3: return( y ); /* bilinear order = 1.5 terms = 4 */
case 4: return( x );
case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */
case 6: return( x*x );
case 7: return( x*y );
case 8: return( y*y );
case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */
case 10: return( x*x*x );
case 11: return( x*x*y );
case 12: return( x*y*y );
case 13: return( y*y*y );
case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */
case 15: return( x*x*x*x );
case 16: return( x*x*x*y );
case 17: return( x*x*y*y );
case 18: return( x*y*y*y );
case 19: return( y*y*y*y );
case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */
}
return( 0.0 ); /* should never happen */
}
static double poly_basis_dy(ssize_t n, double x, double y)
{
/* polynomial term for y derivative */
switch(n) {
case 0: return( 0.0 ); /* constant */
case 1: return( 0.0 );
case 2: return( 1.0 ); /* affine order = 1 terms = 3 */
case 3: return( x ); /* bilinear order = 1.5 terms = 4 */
case 4: return( 0.0 );
case 5: return( y ); /* quadratic order = 2 terms = 6 */
default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */
}
/* NOTE: the only reason that last is not true for 'quadratic'
is due to the re-arrangement of terms to allow for 'bilinear'
*/
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A f f i n e T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AffineTransformImage() transforms an image as dictated by the affine matrix.
% It allocates the memory necessary for the new Image structure and returns
% a pointer to the new image.
%
% The format of the AffineTransformImage method is:
%
% Image *AffineTransformImage(const Image *image,
% AffineMatrix *affine_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o affine_matrix: the affine matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AffineTransformImage(const Image *image,
const AffineMatrix *affine_matrix,ExceptionInfo *exception)
{
double
distort[6];
Image
*deskew_image;
/*
Affine transform image.
*/
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(affine_matrix != (AffineMatrix *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
distort[0]=affine_matrix->sx;
distort[1]=affine_matrix->rx;
distort[2]=affine_matrix->ry;
distort[3]=affine_matrix->sy;
distort[4]=affine_matrix->tx;
distort[5]=affine_matrix->ty;
deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort,
MagickTrue,exception);
return(deskew_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e n e r a t e C o e f f i c i e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GenerateCoefficients() takes user provided input arguments and generates
% the coefficients, needed to apply the specific distortion for either
% distorting images (generally using control points) or generating a color
% gradient from sparsely separated color points.
%
% The format of the GenerateCoefficients() method is:
%
% Image *GenerateCoefficients(const Image *image,DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% size_t number_values, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion/ sparse gradient
%
% o number_arguments: the number of arguments given.
%
% o arguments: the arguments for this distortion method.
%
% o number_values: the style and format of given control points, (caller type)
% 0: 2 dimensional mapping of control points (Distort)
% Format: u,v,x,y where u,v is the 'source' of the
% the color to be plotted, for DistortImage()
% N: Interpolation of control points with N values (usally r,g,b)
% Format: x,y,r,g,b mapping x,y to color values r,g,b
% IN future, variable number of values may be given (1 to N)
%
% o exception: return any errors or warnings in this structure
%
% Note that the returned array of double values must be freed by the
% calling method using RelinquishMagickMemory(). This however may change in
% the future to require a more 'method' specific method.
%
% Because of this this method should not be classed as stable or used
% outside other MagickCore library methods.
*/
static double *GenerateCoefficients(const Image *image,
DistortImageMethod *method,const size_t number_arguments,
const double *arguments,size_t number_values,ExceptionInfo *exception)
{
double
*coeff;
register size_t
i;
size_t
number_coeff, /* number of coefficients to return (array size) */
cp_size, /* number floating point numbers per control point */
cp_x,cp_y, /* the x,y indexes for control point */
cp_values; /* index of values for this control point */
/* number_values Number of values given per control point */
if ( number_values == 0 ) {
/* Image distortion using control points (or other distortion)
That is generate a mapping so that x,y->u,v given u,v,x,y
*/
number_values = 2; /* special case: two values of u,v */
cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */
cp_x = 2; /* location of x,y in input control values */
cp_y = 3;
/* NOTE: cp_values, also used for later 'reverse map distort' tests */
}
else {
cp_x = 0; /* location of x,y in input control values */
cp_y = 1;
cp_values = 2; /* and the other values are after x,y */
/* Typically in this case the values are R,G,B color values */
}
cp_size = number_values+2; /* each CP defintion involves this many numbers */
/* If not enough control point pairs are found for specific distortions
fall back to Affine distortion (allowing 0 to 3 point pairs)
*/
if ( number_arguments < 4*cp_size &&
( *method == BilinearForwardDistortion
|| *method == BilinearReverseDistortion
|| *method == PerspectiveDistortion
) )
*method = AffineDistortion;
number_coeff=0;
switch (*method) {
case AffineDistortion:
/* also BarycentricColorInterpolate: */
number_coeff=3*number_values;
break;
case PolynomialDistortion:
/* number of coefficents depend on the given polynomal 'order' */
if ( number_arguments <= 1 && (number_arguments-1)%cp_size != 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid number of args: order [CPs]...");
return((double *) NULL);
}
i = poly_number_terms(arguments[0]);
number_coeff = 2 + i*number_values;
if ( i == 0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Polynomial",
"Invalid order, should be interger 1 to 5, or 1.5");
return((double *) NULL);
}
if ( number_arguments < 1+i*cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Polynomial", (double) i);
return((double *) NULL);
}
break;
case BilinearReverseDistortion:
number_coeff=4*number_values;
break;
/*
The rest are constants as they are only used for image distorts
*/
case BilinearForwardDistortion:
number_coeff=10; /* 2*4 coeff plus 2 constants */
cp_x = 0; /* Reverse src/dest coords for forward mapping */
cp_y = 1;
cp_values = 2;
break;
#if 0
case QuadraterialDistortion:
number_coeff=19; /* BilinearForward + BilinearReverse */
#endif
break;
case ShepardsDistortion:
number_coeff=1; /* not used, but provide some type of return */
break;
case ArcDistortion:
number_coeff=5;
break;
case ScaleRotateTranslateDistortion:
case AffineProjectionDistortion:
case Plane2CylinderDistortion:
case Cylinder2PlaneDistortion:
number_coeff=6;
break;
case PolarDistortion:
case DePolarDistortion:
number_coeff=8;
break;
case PerspectiveDistortion:
case PerspectiveProjectionDistortion:
number_coeff=9;
break;
case BarrelDistortion:
case BarrelInverseDistortion:
number_coeff=10;
break;
default:
assert(! "Unknown Method Given"); /* just fail assertion */
}
/* allocate the array of coefficients needed */
coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff));
if (coeff == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "GenerateCoefficients");
return((double *) NULL);
}
/* zero out coefficients array */
for (i=0; i < number_coeff; i++)
coeff[i] = 0.0;
switch (*method)
{
case AffineDistortion:
{
/* Affine Distortion
v = c0*x + c1*y + c2
for each 'value' given
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
"Affine", 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* handle special cases of not enough arguments */
if ( number_arguments == cp_size ) {
/* Only 1 CP Set Given */
if ( cp_values == 0 ) {
/* image distortion - translate the image */
coeff[0] = 1.0;
coeff[2] = arguments[0] - arguments[2];
coeff[4] = 1.0;
coeff[5] = arguments[1] - arguments[3];
}
else {
/* sparse gradient - use the values directly */
for (i=0; i<number_values; i++)
coeff[i*3+2] = arguments[cp_values+i];
}
}
else {
/* 2 or more points (usally 3) given.
Solve a least squares simultaneous equation for coefficients.
*/
double
**matrix,
**vectors,
terms[3];
MagickBooleanType
status;
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(3UL,3UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*3]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),3UL,number_values);
}
if ( number_arguments == 2*cp_size ) {
/* Only two pairs were given, but we need 3 to solve the affine.
Fake extra coordinates by rotating p1 around p0 by 90 degrees.
x2 = x0 - (y1-y0) y2 = y0 + (x1-x0)
*/
terms[0] = arguments[cp_x]
- ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */
terms[1] = arguments[cp_y] +
+ ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */
terms[2] = 1; /* 1 */
if ( cp_values == 0 ) {
/* Image Distortion - rotate the u,v coordients too */
double
uv2[2];
uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */
uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */
LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL);
}
else {
/* Sparse Gradient - use values of p0 for linear gradient */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[cp_values]),3UL,number_values);
}
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,3UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 3UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
}
return(coeff);
}
case AffineProjectionDistortion:
{
/*
Arguments: Affine Matrix (forward mapping)
Arguments sx, rx, ry, sy, tx, ty
Where u = sx*x + ry*y + tx
v = rx*x + sy*y + ty
Returns coefficients (in there inverse form) ordered as...
sx ry tx rx sy ty
AffineProjection Distortion Notes...
+ Will only work with a 2 number_values for Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
double inverse[8];
if (number_arguments != 6) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs 6 coeff values'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */
for(i=0; i<6UL; i++ )
inverse[i] = arguments[i];
AffineArgsToCoefficients(inverse); /* map into coefficents */
InvertAffineCoefficients(inverse, coeff); /* invert */
*method = AffineDistortion;
return(coeff);
}
case ScaleRotateTranslateDistortion:
{
/* Scale, Rotate and Translate Distortion
An alternative Affine Distortion
Argument options, by number of arguments given:
7: x,y, sx,sy, a, nx,ny
6: x,y, s, a, nx,ny
5: x,y, sx,sy, a
4: x,y, s, a
3: x,y, a
2: s, a
1: a
Where actions are (in order of application)
x,y 'center' of transforms (default = image center)
sx,sy scale image by this amount (default = 1)
a angle of rotation (argument required)
nx,ny move 'center' here (default = x,y or no movement)
And convert to affine mapping coefficients
ScaleRotateTranslate Distortion Notes...
+ Does not use a set of CPs in any normal way
+ Will only work with a 2 number_valuesal Image Distortion
+ Cannot be used for generating a sparse gradient (interpolation)
*/
double
cosine, sine,
x,y,sx,sy,a,nx,ny;
/* set default center, and default scale */
x = nx = (double)(image->columns)/2.0 + (double)image->page.x;
y = ny = (double)(image->rows)/2.0 + (double)image->page.y;
sx = sy = 1.0;
switch ( number_arguments ) {
case 0:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Needs at least 1 argument'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
case 1:
a = arguments[0];
break;
case 2:
sx = sy = arguments[0];
a = arguments[1];
break;
default:
x = nx = arguments[0];
y = ny = arguments[1];
switch ( number_arguments ) {
case 3:
a = arguments[2];
break;
case 4:
sx = sy = arguments[2];
a = arguments[3];
break;
case 5:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
break;
case 6:
sx = sy = arguments[2];
a = arguments[3];
nx = arguments[4];
ny = arguments[5];
break;
case 7:
sx = arguments[2];
sy = arguments[3];
a = arguments[4];
nx = arguments[5];
ny = arguments[6];
break;
default:
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Too Many Arguments (7 or less)'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
break;
}
/* Trap if sx or sy == 0 -- image is scaled out of existance! */
if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Zero Scale Given'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* Save the given arguments as an affine distortion */
a=DegreesToRadians(a); cosine=cos(a); sine=sin(a);
*method = AffineDistortion;
coeff[0]=cosine/sx;
coeff[1]=sine/sx;
coeff[2]=x-nx*coeff[0]-ny*coeff[1];
coeff[3]=(-sine)/sy;
coeff[4]=cosine/sy;
coeff[5]=y-nx*coeff[3]-ny*coeff[4];
return(coeff);
}
case PerspectiveDistortion:
{ /*
Perspective Distortion (a ratio of affine distortions)
p(x,y) c0*x + c1*y + c2
u = ------ = ------------------
r(x,y) c6*x + c7*y + 1
q(x,y) c3*x + c4*y + c5
v = ------ = ------------------
r(x,y) c6*x + c7*y + 1
c8 = Sign of 'r', or the denominator affine, for the actual image.
This determines what part of the distorted image is 'ground'
side of the horizon, the other part is 'sky' or invalid.
Valid values are +1.0 or -1.0 only.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
Perspective Distortion Notes...
+ Can be thought of as ratio of 3 affine transformations
+ Not separatable: r() or c6 and c7 are used by both equations
+ All 8 coefficients must be determined simultaniously
+ Will only work with a 2 number_valuesal Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
+ It is not linear, but is simple to generate an inverse
+ All lines within an image remain lines.
+ but distances between points may vary.
*/
double
**matrix,
*vectors[1],
terms[8];
size_t
cp_u = cp_values,
cp_v = cp_values+1;
MagickBooleanType
status;
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* fake 1x8 vectors matrix directly using the coefficients array */
vectors[0] = &(coeff[0]);
/* 8x8 least-squares matrix (zeroed) */
matrix = AcquireMagickMatrix(8UL,8UL);
if (matrix == (double **) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* Add control points for least squares solving */
for (i=0; i < number_arguments; i+=4) {
terms[0]=arguments[i+cp_x]; /* c0*x */
terms[1]=arguments[i+cp_y]; /* c1*y */
terms[2]=1.0; /* c2*1 */
terms[3]=0.0;
terms[4]=0.0;
terms[5]=0.0;
terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */
terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]),
8UL,1UL);
terms[0]=0.0;
terms[1]=0.0;
terms[2]=0.0;
terms[3]=arguments[i+cp_x]; /* c3*x */
terms[4]=arguments[i+cp_y]; /* c4*y */
terms[5]=1.0; /* c5*1 */
terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */
terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */
LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]),
8UL,1UL);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,8UL,1UL);
matrix = RelinquishMagickMatrix(matrix, 8UL);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image coordinate (first control point) in
destination for determination of what part of view is 'ground'.
*/
coeff[8] = coeff[6]*arguments[cp_x]
+ coeff[7]*arguments[cp_y] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
return(coeff);
}
case PerspectiveProjectionDistortion:
{
/*
Arguments: Perspective Coefficents (forward mapping)
*/
if (number_arguments != 8) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'Needs 8 coefficient values'",
CommandOptionToMnemonic(MagickDistortOptions, *method));
return((double *) NULL);
}
/* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */
InvertPerspectiveCoefficients(arguments, coeff);
/*
Calculate 9'th coefficient! The ground-sky determination.
What is sign of the 'ground' in r() denominator affine function?
Just use any valid image cocodinate in destination for determination.
For a forward mapped perspective the images 0,0 coord will map to
c2,c5 in the distorted image, so set the sign of denominator of that.
*/
coeff[8] = coeff[6]*arguments[2]
+ coeff[7]*arguments[5] + 1.0;
coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0;
*method = PerspectiveDistortion;
return(coeff);
}
case BilinearForwardDistortion:
case BilinearReverseDistortion:
{
/* Bilinear Distortion (Forward mapping)
v = c0*x + c1*y + c2*x*y + c3;
for each 'value' given
This is actually a simple polynomial Distortion! The difference
however is when we need to reverse the above equation to generate a
BilinearForwardDistortion (see below).
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
double
**matrix,
**vectors,
terms[4];
MagickBooleanType
status;
/* check the number of arguments */
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size*4 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* create matrix, and a fake vectors matrix */
matrix = AcquireMagickMatrix(4UL,4UL);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
if (matrix == (double **) NULL || vectors == (double **) NULL)
{
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x4 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[i*4]);
/* Add given control point pairs for least squares solving */
for (i=0; i < number_arguments; i+=cp_size) {
terms[0] = arguments[i+cp_x]; /* x */
terms[1] = arguments[i+cp_y]; /* y */
terms[2] = terms[0]*terms[1]; /* x*y */
terms[3] = 1; /* 1 */
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),4UL,number_values);
}
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,4UL,number_values);
matrix = RelinquishMagickMatrix(matrix, 4UL);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( *method == BilinearForwardDistortion ) {
/* Bilinear Forward Mapped Distortion
The above least-squares solved for coefficents but in the forward
direction, due to changes to indexing constants.
i = c0*x + c1*y + c2*x*y + c3;
j = c4*x + c5*y + c6*x*y + c7;
where i,j are in the destination image, NOT the source.
Reverse Pixel mapping however needs to use reverse of these
functions. It required a full page of algbra to work out the
reversed mapping formula, but resolves down to the following...
c8 = c0*c5-c1*c4;
c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula
i = i - c3; j = j - c7;
b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0
c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a)
r = b*b - c9*(c+c);
if ( c9 != 0 )
y = ( -b + sqrt(r) ) / c9;
else
y = -c/b;
x = ( i - c1*y) / ( c1 - c2*y );
NB: if 'r' is negative there is no solution!
NB: the sign of the sqrt() should be negative if image becomes
flipped or flopped, or crosses over itself.
NB: techniqually coefficient c5 is not needed, anymore,
but kept for completness.
See Anthony Thyssen <A.Thyssen@griffith.edu.au>
or Fred Weinhaus <fmw@alink.net> for more details.
*/
coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4];
coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]);
}
return(coeff);
}
#if 0
case QuadrilateralDistortion:
{
/* Map a Quadrilateral to a unit square using BilinearReverse
Then map that unit square back to the final Quadrilateral
using BilinearForward.
Input Arguments are sets of control points...
For Distort Images u,v, x,y ...
For Sparse Gradients x,y, r,g,b ...
*/
/* UNDER CONSTRUCTION */
return(coeff);
}
#endif
case PolynomialDistortion:
{
/* Polynomial Distortion
First two coefficents are used to hole global polynomal information
c0 = Order of the polynimial being created
c1 = number_of_terms in one polynomial equation
Rest of the coefficients map to the equations....
v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ...
for each 'value' (number_values of them) given.
As such total coefficients = 2 + number_terms * number_values
Input Arguments are sets of control points...
For Distort Images order [u,v, x,y] ...
For Sparse Gradients order [x,y, r,g,b] ...
Polynomial Distortion Notes...
+ UNDER DEVELOPMENT -- Do not expect this to remain as is.
+ Currently polynomial is a reversed mapped distortion.
+ Order 1.5 is fudged to map into a bilinear distortion.
though it is not the same order as that distortion.
*/
double
**matrix,
**vectors,
*terms;
size_t
nterms; /* number of polynomial terms per number_values */
register ssize_t
j;
MagickBooleanType
status;
/* first two coefficients hold polynomial order information */
coeff[0] = arguments[0];
coeff[1] = (double) poly_number_terms(arguments[0]);
nterms = (size_t) coeff[1];
/* create matrix, a fake vectors matrix, and least sqs terms */
matrix = AcquireMagickMatrix(nterms,nterms);
vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors));
terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms));
if (matrix == (double **) NULL ||
vectors == (double **) NULL ||
terms == (double *) NULL )
{
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
terms = (double *) RelinquishMagickMemory(terms);
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((double *) NULL);
}
/* fake a number_values x3 vectors matrix from coefficients array */
for (i=0; i < number_values; i++)
vectors[i] = &(coeff[2+i*nterms]);
/* Add given control point pairs for least squares solving */
for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */
for (j=0; j < (ssize_t) nterms; j++)
terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]);
LeastSquaresAddTerms(matrix,vectors,terms,
&(arguments[i+cp_values]),nterms,number_values);
}
terms = (double *) RelinquishMagickMemory(terms);
/* Solve for LeastSquares Coefficients */
status=GaussJordanElimination(matrix,vectors,nterms,number_values);
matrix = RelinquishMagickMatrix(matrix, nterms);
vectors = (double **) RelinquishMagickMemory(vectors);
if ( status == MagickFalse ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Unsolvable Matrix'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
return(coeff);
}
case ArcDistortion:
{
/* Arc Distortion
Args: arc_width rotate top_edge_radius bottom_edge_radius
All but first argument are optional
arc_width The angle over which to arc the image side-to-side
rotate Angle to rotate image from vertical center
top_radius Set top edge of source image at this radius
bottom_radius Set bootom edge to this radius (radial scaling)
By default, if the radii arguments are nor provided the image radius
is calculated so the horizontal center-line is fits the given arc
without scaling.
The output image size is ALWAYS adjusted to contain the whole image,
and an offset is given to position image relative to the 0,0 point of
the origin, allowing users to use relative positioning onto larger
background (via -flatten).
The arguments are converted to these coefficients
c0: angle for center of source image
c1: angle scale for mapping to source image
c2: radius for top of source image
c3: radius scale for mapping source image
c4: centerline of arc within source image
Note the coefficients use a center angle, so asymptotic join is
furthest from both sides of the source image. This also means that
for arc angles greater than 360 the sides of the image will be
trimmed equally.
Arc Distortion Notes...
+ Does not use a set of CPs
+ Will only work with Image Distortion
+ Can not be used for generating a sparse gradient (interpolation)
*/
if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Arc Angle Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : 'Outer Radius Too Small'",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
coeff[0] = -MagickPI2; /* -90, place at top! */
if ( number_arguments >= 1 )
coeff[1] = DegreesToRadians(arguments[0]);
else
coeff[1] = MagickPI2; /* zero arguments - center is at top */
if ( number_arguments >= 2 )
coeff[0] += DegreesToRadians(arguments[1]);
coeff[0] /= Magick2PI; /* normalize radians */
coeff[0] -= MagickRound(coeff[0]);
coeff[0] *= Magick2PI; /* de-normalize back to radians */
coeff[3] = (double)image->rows-1;
coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0;
if ( number_arguments >= 3 ) {
if ( number_arguments >= 4 )
coeff[3] = arguments[2] - arguments[3];
else
coeff[3] *= arguments[2]/coeff[2];
coeff[2] = arguments[2];
}
coeff[4] = ((double)image->columns-1.0)/2.0;
return(coeff);
}
case PolarDistortion:
case DePolarDistortion:
{
/* (De)Polar Distortion (same set of arguments)
Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato
DePolar can also have the extra arguments of Width, Height
Coefficients 0 to 5 is the sanatized version first 6 input args
Coefficient 6 is the angle to coord ratio and visa-versa
Coefficient 7 is the radius to coord ratio and visa-versa
WARNING: It is possible for Radius max<min and/or Angle from>to
*/
if ( number_arguments == 3
|| ( number_arguments > 6 && *method == PolarDistortion )
|| number_arguments > 8 ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* Rmax - if 0 calculate appropriate value */
if ( number_arguments >= 1 )
coeff[0] = arguments[0];
else
coeff[0] = 0.0;
/* Rmin - usally 0 */
coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0;
/* Center X,Y */
if ( number_arguments >= 4 ) {
coeff[2] = arguments[2];
coeff[3] = arguments[3];
}
else { /* center of actual image */
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
}
/* Angle from,to - about polar center 0 is downward */
coeff[4] = -MagickPI;
if ( number_arguments >= 5 )
coeff[4] = DegreesToRadians(arguments[4]);
coeff[5] = coeff[4];
if ( number_arguments >= 6 )
coeff[5] = DegreesToRadians(arguments[5]);
if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon )
coeff[5] += Magick2PI; /* same angle is a full circle */
/* if radius 0 or negative, its a special value... */
if ( coeff[0] < MagickEpsilon ) {
/* Use closest edge if radius == 0 */
if ( fabs(coeff[0]) < MagickEpsilon ) {
coeff[0]=MagickMin(fabs(coeff[2]-image->page.x),
fabs(coeff[3]-image->page.y));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[2]-image->page.x-image->columns));
coeff[0]=MagickMin(coeff[0],
fabs(coeff[3]-image->page.y-image->rows));
}
/* furthest diagonal if radius == -1 */
if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) {
double rx,ry;
rx = coeff[2]-image->page.x;
ry = coeff[3]-image->page.y;
coeff[0] = rx*rx+ry*ry;
ry = coeff[3]-image->page.y-image->rows;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
rx = coeff[2]-image->page.x-image->columns;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
ry = coeff[3]-image->page.y;
coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry);
coeff[0] = sqrt(coeff[0]);
}
}
/* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */
if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon
|| (coeff[0]-coeff[1]) < MagickEpsilon ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid Radius",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
/* converstion ratios */
if ( *method == PolarDistortion ) {
coeff[6]=(double) image->columns/(coeff[5]-coeff[4]);
coeff[7]=(double) image->rows/(coeff[0]-coeff[1]);
}
else { /* *method == DePolarDistortion */
coeff[6]=(coeff[5]-coeff[4])/image->columns;
coeff[7]=(coeff[0]-coeff[1])/image->rows;
}
return(coeff);
}
case Cylinder2PlaneDistortion:
case Plane2CylinderDistortion:
{
/* 3D Cylinder to/from a Tangential Plane
Projection between a clinder and flat plain from a point on the
center line of the cylinder.
The two surfaces coincide in 3D space at the given centers of
distortion (perpendicular to projection point) on both images.
Args: FOV_arc_width
Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y
FOV (Field Of View) the angular field of view of the distortion,
across the width of the image, in degrees. The centers are the
points of least distortion in the input and resulting images.
These centers are however determined later.
Coeff 0 is the FOV angle of view of image width in radians
Coeff 1 is calculated radius of cylinder.
Coeff 2,3 center of distortion of input image
Coefficents 4,5 Center of Distortion of dest (determined later)
*/
if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : Invalid FOV Angle",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
coeff[0] = DegreesToRadians(arguments[0]);
if ( *method == Cylinder2PlaneDistortion )
/* image is curved around cylinder, so FOV angle (in radians)
* scales directly to image X coordinate, according to its radius.
*/
coeff[1] = image->columns/coeff[0];
else
/* radius is distance away from an image with this angular FOV */
coeff[1] = image->columns / ( 2 * tan(coeff[0]/2) );
coeff[2] = (double)(image->columns)/2.0+image->page.x;
coeff[3] = (double)(image->rows)/2.0+image->page.y;
coeff[4] = coeff[2];
coeff[5] = coeff[3]; /* assuming image size is the same */
return(coeff);
}
case BarrelDistortion:
case BarrelInverseDistortion:
{
/* Barrel Distortion
Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd
BarrelInv Distortion
Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D)
Where Rd is the normalized radius from corner to middle of image
Input Arguments are one of the following forms (number of arguments)...
3: A,B,C
4: A,B,C,D
5: A,B,C X,Y
6: A,B,C,D X,Y
8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy
10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y
Returns 10 coefficent values, which are de-normalized (pixel scale)
Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc
*/
/* Radius de-normalization scaling factor */
double
rscale = 2.0/MagickMin((double) image->columns,(double) image->rows);
/* sanity check number of args must = 3,4,5,6,8,10 or error */
if ( (number_arguments < 3) || (number_arguments == 7) ||
(number_arguments == 9) || (number_arguments > 10) )
{
coeff=(double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"InvalidArgument", "%s : number of arguments",
CommandOptionToMnemonic(MagickDistortOptions, *method) );
return((double *) NULL);
}
/* A,B,C,D coefficients */
coeff[0] = arguments[0];
coeff[1] = arguments[1];
coeff[2] = arguments[2];
if ((number_arguments == 3) || (number_arguments == 5) )
coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2];
else
coeff[3] = arguments[3];
/* de-normalize the coefficients */
coeff[0] *= pow(rscale,3.0);
coeff[1] *= rscale*rscale;
coeff[2] *= rscale;
/* Y coefficients: as given OR same as X coefficients */
if ( number_arguments >= 8 ) {
coeff[4] = arguments[4] * pow(rscale,3.0);
coeff[5] = arguments[5] * rscale*rscale;
coeff[6] = arguments[6] * rscale;
coeff[7] = arguments[7];
}
else {
coeff[4] = coeff[0];
coeff[5] = coeff[1];
coeff[6] = coeff[2];
coeff[7] = coeff[3];
}
/* X,Y Center of Distortion (image coodinates) */
if ( number_arguments == 5 ) {
coeff[8] = arguments[3];
coeff[9] = arguments[4];
}
else if ( number_arguments == 6 ) {
coeff[8] = arguments[4];
coeff[9] = arguments[5];
}
else if ( number_arguments == 10 ) {
coeff[8] = arguments[8];
coeff[9] = arguments[9];
}
else {
/* center of the image provided (image coodinates) */
coeff[8] = (double)image->columns/2.0 + image->page.x;
coeff[9] = (double)image->rows/2.0 + image->page.y;
}
return(coeff);
}
case ShepardsDistortion:
{
/* Shepards Distortion input arguments are the coefficents!
Just check the number of arguments is valid!
Args: u1,v1, x1,y1, ...
OR : u1,v1, r1,g1,c1, ...
*/
if ( number_arguments%cp_size != 0 ||
number_arguments < cp_size ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument", "%s : 'require at least %.20g CPs'",
CommandOptionToMnemonic(MagickDistortOptions, *method), 1.0);
coeff=(double *) RelinquishMagickMemory(coeff);
return((double *) NULL);
}
return(coeff);
}
default:
break;
}
/* you should never reach this point */
assert(! "No Method Handler"); /* just fail assertion */
return((double *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s t o r t R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortResizeImage() resize image using the equivalent but slower image
% distortion operator. The filter is applied using a EWA cylindrical
% resampling. But like resize the final image size is limited to whole pixels
% with no effects by virtual-pixels on the result.
%
% Note that images containing a transparency channel will be twice as slow to
% resize as images one without transparency.
%
% The format of the DistortResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *DistortResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
#define DistortResizeImageTag "Distort/Image"
Image
*resize_image,
*tmp_image;
RectangleInfo
crop_area;
double
distort_args[12];
VirtualPixelMethod
vp_save;
/*
Distort resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if ((columns == 0) || (rows == 0))
return((Image *) NULL);
/* Do not short-circuit this resize if final image size is unchanged */
(void) SetImageVirtualPixelMethod(image,TransparentVirtualPixelMethod);
(void) ResetMagickMemory(distort_args,0,12*sizeof(double));
distort_args[4]=(double) image->columns;
distort_args[6]=(double) columns;
distort_args[9]=(double) image->rows;
distort_args[11]=(double) rows;
vp_save=GetImageVirtualPixelMethod(image);
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod);
if (image->matte == MagickFalse)
{
/*
Image has not transparency channel, so we free to use it
*/
(void) SetImageAlphaChannel(tmp_image,SetAlphaChannel);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
InheritException(exception,&image->exception);
}
else
{
/*
Image has transparency so handle colors and alpha separatly.
Basically we need to separate Virtual-Pixel alpha in the resized
image, so only the actual original images alpha channel is used.
*/
Image
*resize_alpha;
/* distort alpha channel separately */
(void) SeparateImageChannel(tmp_image,TrueAlphaChannel);
(void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel);
resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_alpha == (Image *) NULL )
return((Image *) NULL);
/* distort the actual image containing alpha + VP alpha */
tmp_image=CloneImage(image,0,0,MagickTrue,exception);
if ( tmp_image == (Image *) NULL )
return((Image *) NULL);
(void) SetImageVirtualPixelMethod(tmp_image,
TransparentVirtualPixelMethod);
resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args,
MagickTrue,exception),
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL)
{
resize_alpha=DestroyImage(resize_alpha);
return((Image *) NULL);
}
/* replace resize images alpha with the separally distorted alpha */
(void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel);
(void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel);
(void) CompositeImage(resize_image,CopyOpacityCompositeOp,resize_alpha,
0,0);
InheritException(exception,&resize_image->exception);
resize_alpha=DestroyImage(resize_alpha);
}
(void) SetImageVirtualPixelMethod(resize_image,vp_save);
/*
Clean up the results of the Distortion
*/
crop_area.width=columns;
crop_area.height=rows;
crop_area.x=0;
crop_area.y=0;
tmp_image=resize_image;
resize_image=CropImage(tmp_image,&crop_area,exception);
tmp_image=DestroyImage(tmp_image);
if ( resize_image == (Image *) NULL )
return((Image *) NULL);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D i s t o r t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DistortImage() distorts an image using various distortion methods, by
% mapping color lookups of the source image to a new destination image
% usally of the same size as the source image, unless 'bestfit' is set to
% true.
%
% If 'bestfit' is enabled, and distortion allows it, the destination image is
% adjusted to ensure the whole source 'image' will just fit within the final
% destination image, which will be sized and offset accordingly. Also in
% many cases the virtual offset of the source image will be taken into
% account in the mapping.
%
% If the '-verbose' control option has been set print to standard error the
% equicelent '-fx' formula with coefficients for the function, if practical.
%
% The format of the DistortImage() method is:
%
% Image *DistortImage(const Image *image,const DistortImageMethod method,
% const size_t number_arguments,const double *arguments,
% MagickBooleanType bestfit, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be distorted.
%
% o method: the method of image distortion.
%
% ArcDistortion always ignores source image offset, and always
% 'bestfit' the destination image with the top left corner offset
% relative to the polar mapping center.
%
% Affine, Perspective, and Bilinear, do least squares fitting of the
% distrotion when more than the minimum number of control point pairs
% are provided.
%
% Perspective, and Bilinear, fall back to a Affine distortion when less
% than 4 control point pairs are provided. While Affine distortions
% let you use any number of control point pairs, that is Zero pairs is
% a No-Op (viewport only) distortion, one pair is a translation and
% two pairs of control points do a scale-rotate-translate, without any
% shearing.
%
% o number_arguments: the number of arguments given.
%
% o arguments: an array of floating point arguments for this method.
%
% o bestfit: Attempt to 'bestfit' the size of the resulting image.
% This also forces the resulting image to be a 'layered' virtual
% canvas image. Can be overridden using 'distort:viewport' setting.
%
% o exception: return any errors or warnings in this structure
%
% Extra Controls from Image meta-data (artifacts)...
%
% o "verbose"
% Output to stderr alternatives, internal coefficents, and FX
% equivalents for the distortion operation (if feasible).
% This forms an extra check of the distortion method, and allows users
% access to the internal constants IM calculates for the distortion.
%
% o "distort:viewport"
% Directly set the output image canvas area and offest to use for the
% resulting image, rather than use the original images canvas, or a
% calculated 'bestfit' canvas.
%
% o "distort:scale"
% Scale the size of the output canvas by this amount to provide a
% method of Zooming, and for super-sampling the results.
%
% Other settings that can effect results include
%
% o 'interpolate' For source image lookups (scale enlargements)
%
% o 'filter' Set filter to use for area-resampling (scale shrinking).
% Set to 'point' to turn off and use 'interpolate' lookup
% instead
%
*/
MagickExport Image *DistortImage(const Image *image,DistortImageMethod method,
const size_t number_arguments,const double *arguments,
MagickBooleanType bestfit,ExceptionInfo *exception)
{
#define DistortImageTag "Distort/Image"
double
*coeff,
output_scaling;
Image
*distort_image;
RectangleInfo
geometry; /* geometry of the distorted space viewport */
MagickBooleanType
viewport_given;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/*
Handle Special Compound Distortions
*/
if ( method == ResizeDistortion )
{
if ( number_arguments != 2 )
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","Resize",
"Invalid number of args: 2 only");
return((Image *) NULL);
}
distort_image=DistortResizeImage(image,(size_t)arguments[0],
(size_t)arguments[1], exception);
return(distort_image);
}
/*
Convert input arguments (usually as control points for reverse mapping)
into mapping coefficients to apply the distortion.
Note that some distortions are mapped to other distortions,
and as such do not require specific code after this point.
*/
coeff = GenerateCoefficients(image, &method, number_arguments,
arguments, 0, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Determine the size and offset for a 'bestfit' destination.
Usally the four corners of the source image is enough.
*/
/* default output image bounds, when no 'bestfit' is requested */
geometry.width=image->columns;
geometry.height=image->rows;
geometry.x=0;
geometry.y=0;
if ( method == ArcDistortion ) {
bestfit = MagickTrue; /* always calculate a 'best fit' viewport */
}
/* Work out the 'best fit', (required for ArcDistortion) */
if ( bestfit ) {
PointInfo
s,d,min,max; /* source, dest coords --mapping--> min, max coords */
MagickBooleanType
fix_bounds = MagickTrue; /* enlarge bounds for VP handling */
s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */
/* defines to figure out the bounds of the distorted image */
#define InitalBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = max.x = p.x; \
min.y = max.y = p.y; \
}
#define ExpandBounds(p) \
{ \
/* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \
min.x = MagickMin(min.x,p.x); \
max.x = MagickMax(max.x,p.x); \
min.y = MagickMin(min.y,p.y); \
max.y = MagickMax(max.y,p.y); \
}
switch (method)
{
case AffineDistortion:
{ double inverse[6];
InvertAffineCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2];
d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5];
ExpandBounds(d);
break;
}
case PerspectiveDistortion:
{ double inverse[8], scale;
InvertPerspectiveCoefficients(coeff, inverse);
s.x = (double) image->page.x;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=1.0/( (fabs(scale) <= MagickEpsilon) ? 1.0 : scale );
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
InitalBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=1.0/( (fabs(scale) <= MagickEpsilon) ? 1.0 : scale );
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=1.0/( (fabs(scale) <= MagickEpsilon) ? 1.0 : scale );
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
s.x = (double) image->page.x+image->columns;
s.y = (double) image->page.y+image->rows;
scale=inverse[6]*s.x+inverse[7]*s.y+1.0;
scale=1.0/( (fabs(scale) <= MagickEpsilon) ? 1.0 : scale );
d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]);
d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]);
ExpandBounds(d);
break;
}
case ArcDistortion:
{ double a, ca, sa;
/* Forward Map Corners */
a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
InitalBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
d.x = (coeff[2]-coeff[3])*ca;
d.y = (coeff[2]-coeff[3])*sa;
ExpandBounds(d);
/* Orthogonal points along top of arc */
for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2);
a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) {
ca = cos(a); sa = sin(a);
d.x = coeff[2]*ca;
d.y = coeff[2]*sa;
ExpandBounds(d);
}
/*
Convert the angle_to_width and radius_to_height
to appropriate scaling factors, to allow faster processing
in the mapping function.
*/
coeff[1] = (double) (Magick2PI*image->columns/coeff[1]);
coeff[3] = (double)image->rows/coeff[3];
break;
}
case PolarDistortion:
{
if (number_arguments < 2)
coeff[2] = coeff[3] = 0.0;
min.x = coeff[2]-coeff[0];
max.x = coeff[2]+coeff[0];
min.y = coeff[3]-coeff[0];
max.y = coeff[3]+coeff[0];
/* should be about 1.0 if Rmin = 0 */
coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]);
break;
}
case DePolarDistortion:
{
/* direct calculation as it needs to tile correctly
* for reversibility in a DePolar-Polar cycle */
fix_bounds = MagickFalse;
geometry.x = geometry.y = 0;
geometry.height = (size_t) ceil(coeff[0]-coeff[1]);
geometry.width = (size_t)
ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5);
/* correct scaling factors relative to new size */
coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */
coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */
break;
}
case Cylinder2PlaneDistortion:
{
/* direct calculation so center of distortion is either a pixel
* center, or pixel edge. This allows for reversibility of the
* distortion */
geometry.x = geometry.y = 0;
geometry.width = ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) );
geometry.height = ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) );
/* correct center of distortion relative to new size */
coeff[4] = geometry.width/2.0;
coeff[5] = geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case Plane2CylinderDistortion:
{
/* direct calculation center is either pixel center, or pixel edge
* so as to allow reversibility of the image distortion */
geometry.x = geometry.y = 0;
geometry.width = ceil(coeff[0]*coeff[1]); /* FOV * radius */
geometry.height = 2*coeff[3]; /* input image height */
/* correct center of distortion relative to new size */
coeff[4] = geometry.width/2.0;
coeff[5] = geometry.height/2.0;
fix_bounds = MagickFalse;
break;
}
case ShepardsDistortion:
case BilinearForwardDistortion:
case BilinearReverseDistortion:
#if 0
case QuadrilateralDistortion:
#endif
case PolynomialDistortion:
case BarrelDistortion:
case BarrelInverseDistortion:
default:
/* no calculated bestfit available for these distortions */
bestfit = MagickFalse;
fix_bounds = MagickFalse;
break;
}
/* Set the output image geometry to calculated 'bestfit'.
Yes this tends to 'over do' the file image size, ON PURPOSE!
Do not do this for DePolar which needs to be exact for virtual tiling.
*/
if ( fix_bounds ) {
geometry.x = (ssize_t) floor(min.x-0.5);
geometry.y = (ssize_t) floor(min.y-0.5);
geometry.width=(size_t) ceil(max.x-geometry.x+0.5);
geometry.height=(size_t) ceil(max.y-geometry.y+0.5);
}
} /* end bestfit destination image calculations */
/* The user provided a 'viewport' expert option which may
overrides some parts of the current output image geometry.
This also overrides its default 'bestfit' setting.
*/
{ const char *artifact=GetImageArtifact(image,"distort:viewport");
viewport_given = MagickFalse;
if ( artifact != (const char *) NULL ) {
(void) ParseAbsoluteGeometry(artifact,&geometry);
viewport_given = MagickTrue;
}
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
register ssize_t
i;
char image_gen[MaxTextExtent];
const char *lookup;
/* Set destination image size and virtual offset */
if ( bestfit || viewport_given ) {
(void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g "
"-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width,
(double) geometry.height,(double) geometry.x,(double) geometry.y);
lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }";
}
else {
image_gen[0] = '\0'; /* no destination to generate */
lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */
}
switch (method) {
case AffineDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortImages");
return((Image *) NULL);
}
InvertAffineCoefficients(coeff, inverse);
CoefficientsToAffineArgs(inverse);
(void) FormatLocaleFile(stderr, "Affine Projection:\n");
(void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '");
for (i=0; i < 5; i++)
(void) FormatLocaleFile(stderr, "%lf,", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case PerspectiveDistortion:
{
double *inverse;
inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse));
if (inverse == (double *) NULL) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed",
"%s", "DistortCoefficients");
return((Image *) NULL);
}
InvertPerspectiveCoefficients(coeff, inverse);
(void) FormatLocaleFile(stderr, "Perspective Projection:\n");
(void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '");
for (i=0; i<4; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "\n ");
for (; i<7; i++)
(void) FormatLocaleFile(stderr, "%lf, ", inverse[i]);
(void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]);
inverse = (double *) RelinquishMagickMemory(inverse);
(void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n",
coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n",
coeff[3], coeff[4], coeff[5]);
(void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n",
coeff[8] < 0 ? "<" : ">", lookup);
break;
}
case BilinearForwardDistortion:
(void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
#if 0
/* for debugging */
(void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n",
coeff[8], coeff[9]);
#endif
(void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
0.5-coeff[3], 0.5-coeff[7]);
(void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n",
coeff[6], -coeff[2], coeff[8]);
/* Handle Special degenerate (non-quadratic) or trapezoidal case */
if ( coeff[9] != 0 ) {
(void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",
-2*coeff[9], coeff[4], -coeff[0]);
(void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",
coeff[9]);
} else
(void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n",
-coeff[4], coeff[0]);
(void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",
-coeff[1], coeff[0], coeff[2]);
if ( coeff[9] != 0 )
(void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup);
else
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case BilinearReverseDistortion:
#if 0
(void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n");
(void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n");
(void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n",
coeff[3], coeff[0], coeff[1], coeff[2]);
(void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n",
coeff[7], coeff[4], coeff[5], coeff[6]);
#endif
(void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[0], coeff[1], coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",
coeff[4], coeff[5], coeff[6], coeff[7]);
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
case PolynomialDistortion:
{
size_t nterms = (size_t) coeff[1];
(void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",
coeff[0],(unsigned long) nterms);
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n");
(void) FormatLocaleFile(stderr, " xx =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n yy =");
for (i=0; i<(ssize_t) nterms; i++) {
if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n ");
(void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms],
poly_basis_str(i));
}
(void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup);
break;
}
case ArcDistortion:
{
(void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n");
for ( i=0; i<5; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n");
(void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n",
-coeff[0]);
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n",
coeff[1], coeff[4]);
(void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",
coeff[2], coeff[3]);
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case PolarDistortion:
{
(void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",
-coeff[2], -coeff[3]);
(void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n",
-(coeff[4]+coeff[5])/2 );
(void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n");
(void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n",
coeff[6] );
(void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n",
-coeff[1], coeff[7] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case DePolarDistortion:
{
(void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n");
for ( i=0; i<8; i++ )
(void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]);
(void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], -coeff[4] );
(void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] );
(void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] );
(void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n");
break;
}
case Cylinder2PlaneDistortion:
{
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
}
case Plane2CylinderDistortion:
{
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n");
(void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]);
(void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n");
(void) FormatLocaleFile(stderr, "%s", image_gen);
(void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",
-coeff[4], -coeff[5]);
(void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] );
(void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n",
coeff[1], coeff[2] );
(void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n",
coeff[3] );
(void) FormatLocaleFile(stderr, " %s' \\\n", lookup);
break;
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ double xc,yc;
/* NOTE: This does the barrel roll in pixel coords not image coords
** The internal distortion must do it in image coordinates,
** so that is what the center coeff (8,9) is given in.
*/
xc = ((double)image->columns-1.0)/2.0 + image->page.x;
yc = ((double)image->rows-1.0)/2.0 + image->page.y;
(void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n",
method == BarrelDistortion ? "" : "Inv");
(void) FormatLocaleFile(stderr, "%s", image_gen);
if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 )
(void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n");
else
(void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n",
coeff[8]-0.5, coeff[9]-0.5);
(void) FormatLocaleFile(stderr,
" ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n");
(void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[0],coeff[1],coeff[2],coeff[3]);
(void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n",
method == BarrelDistortion ? "*" : "/",
coeff[4],coeff[5],coeff[6],coeff[7]);
(void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n");
}
default:
break;
}
}
/* The user provided a 'scale' expert option will scale the
output image size, by the factor given allowing for super-sampling
of the distorted image space. Any scaling factors must naturally
be halved as a result.
*/
{ const char *artifact;
artifact=GetImageArtifact(image,"distort:scale");
output_scaling = 1.0;
if (artifact != (const char *) NULL) {
output_scaling = fabs(StringToDouble(artifact,(char **) NULL));
geometry.width *= output_scaling;
geometry.height *= output_scaling;
geometry.x *= output_scaling;
geometry.y *= output_scaling;
if ( output_scaling < 0.1 ) {
coeff = (double *) RelinquishMagickMemory(coeff);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s", "-set option:distort:scale" );
return((Image *) NULL);
}
output_scaling = 1/output_scaling;
}
}
#define ScaleFilter(F,A,B,C,D) \
ScaleResampleFilter( (F), \
output_scaling*(A), output_scaling*(B), \
output_scaling*(C), output_scaling*(D) )
/*
Initialize the distort image attributes.
*/
distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue,
exception);
if (distort_image == (Image *) NULL)
return((Image *) NULL);
/* if image is ColorMapped - change it to DirectClass */
if (SetImageStorageClass(distort_image,DirectClass) == MagickFalse)
{
InheritException(exception,&distort_image->exception);
distort_image=DestroyImage(distort_image);
return((Image *) NULL);
}
distort_image->page.x=geometry.x;
distort_image->page.y=geometry.y;
if (distort_image->background_color.opacity != OpaqueOpacity)
distort_image->matte=MagickTrue;
{ /* ----- MAIN CODE -----
Sample the source image to each pixel in the distort image.
*/
CacheView
*distort_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
ResampleFilter
**restrict resample_filter;
ssize_t
j;
status=MagickTrue;
progress=0;
GetMagickPixelPacket(distort_image,&zero);
resample_filter=AcquireResampleFilterThreadSet(image,
UndefinedVirtualPixelMethod,MagickFalse,exception);
distort_view=AcquireCacheView(distort_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (j=0; j < (ssize_t) distort_image->rows; j++)
{
const int
id = GetOpenMPThreadId();
double
validity; /* how mathematically valid is this the mapping */
MagickBooleanType
sync;
MagickPixelPacket
pixel, /* pixel color to assign to distorted image */
invalid; /* the color to assign when distort result is invalid */
PointInfo
d,
s; /* transform destination image x,y to source image x,y */
register IndexPacket
*restrict indexes;
register ssize_t
i;
register PixelPacket
*restrict q;
q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(distort_view);
pixel=zero;
/* Define constant scaling vectors for Affine Distortions
Other methods are either variable, or use interpolated lookup
*/
switch (method)
{
case AffineDistortion:
ScaleFilter( resample_filter[id],
coeff[0], coeff[1],
coeff[3], coeff[4] );
break;
default:
break;
}
/* Initialize default pixel validity
* negative: pixel is invalid output 'matte_color'
* 0.0 to 1.0: antialiased, mix with resample output
* 1.0 or greater: use resampled output.
*/
validity = 1.0;
GetMagickPixelPacket(distort_image,&invalid);
SetMagickPixelPacket(distort_image,&distort_image->matte_color,
(IndexPacket *) NULL, &invalid);
if (distort_image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&invalid); /* what about other color spaces? */
for (i=0; i < (ssize_t) distort_image->columns; i++)
{
/* map pixel coordinate to distortion space coordinate */
d.x = (double) (geometry.x+i+0.5)*output_scaling;
d.y = (double) (geometry.y+j+0.5)*output_scaling;
s = d; /* default is a no-op mapping */
switch (method)
{
case AffineDistortion:
{
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
/* Affine partial derivitives are constant -- set above */
break;
}
case PerspectiveDistortion:
{
double
p,q,r,abs_r,abs_c6,abs_c7,scale;
/* perspective is a ratio of affines */
p=coeff[0]*d.x+coeff[1]*d.y+coeff[2];
q=coeff[3]*d.x+coeff[4]*d.y+coeff[5];
r=coeff[6]*d.x+coeff[7]*d.y+1.0;
/* Pixel Validity -- is it a 'sky' or 'ground' pixel */
validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0;
/* Determine horizon anti-alias blending */
abs_r = fabs(r)*2;
abs_c6 = fabs(coeff[6]);
abs_c7 = fabs(coeff[7]);
if ( abs_c6 > abs_c7 ) {
if ( abs_r < abs_c6*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling);
}
else if ( abs_r < abs_c7*output_scaling )
validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling);
/* Perspective Sampling Point (if valid) */
if ( validity > 0.0 ) {
/* divide by r affine, for perspective scaling */
scale = 1.0/r;
s.x = p*scale;
s.y = q*scale;
/* Perspective Partial Derivatives or Scaling Vectors */
scale *= scale;
ScaleFilter( resample_filter[id],
(r*coeff[0] - p*coeff[6])*scale,
(r*coeff[1] - p*coeff[7])*scale,
(r*coeff[3] - q*coeff[6])*scale,
(r*coeff[4] - q*coeff[7])*scale );
}
break;
}
case BilinearReverseDistortion:
{
/* Reversed Mapped is just a simple polynomial */
s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3];
s.y=coeff[4]*d.x+coeff[5]*d.y
+coeff[6]*d.x*d.y+coeff[7];
/* Bilinear partial derivitives of scaling vectors */
ScaleFilter( resample_filter[id],
coeff[0] + coeff[2]*d.y,
coeff[1] + coeff[2]*d.x,
coeff[4] + coeff[6]*d.y,
coeff[5] + coeff[6]*d.x );
break;
}
case BilinearForwardDistortion:
{
/* Forward mapped needs reversed polynomial equations
* which unfortunatally requires a square root! */
double b,c;
d.x -= coeff[3]; d.y -= coeff[7];
b = coeff[6]*d.x - coeff[2]*d.y + coeff[8];
c = coeff[4]*d.x - coeff[0]*d.y;
validity = 1.0;
/* Handle Special degenerate (non-quadratic) case
* Currently without horizon anti-alising */
if ( fabs(coeff[9]) < MagickEpsilon )
s.y = -c/b;
else {
c = b*b - 2*coeff[9]*c;
if ( c < 0.0 )
validity = 0.0;
else
s.y = ( -b + sqrt(c) )/coeff[9];
}
if ( validity > 0.0 )
s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y );
/* NOTE: the sign of the square root should be -ve for parts
where the source image becomes 'flipped' or 'mirrored'.
FUTURE: Horizon handling
FUTURE: Scaling factors or Deritives (how?)
*/
break;
}
#if 0
case BilinearDistortion:
/* Bilinear mapping of any Quadrilateral to any Quadrilateral */
/* UNDER DEVELOPMENT */
break;
#endif
case PolynomialDistortion:
{
/* multi-ordered polynomial */
register ssize_t
k;
ssize_t
nterms=(ssize_t)coeff[1];
PointInfo
du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */
s.x=s.y=du.x=du.y=dv.x=dv.y=0.0;
for(k=0; k < nterms; k++) {
s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k];
du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k];
du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k];
s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms];
dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms];
dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms];
}
ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y );
break;
}
case ArcDistortion:
{
/* what is the angle and radius in the destination image */
s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI);
s.x -= MagickRound(s.x); /* angle */
s.y = hypot(d.x,d.y); /* radius */
/* Arc Distortion Partial Scaling Vectors
Are derived by mapping the perpendicular unit vectors
dR and dA*R*2PI rather than trying to map dx and dy
The results is a very simple orthogonal aligned ellipse.
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[3] );
/* now scale the angle and radius for source image lookup point */
s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5;
s.y = (coeff[2] - s.y) * coeff[3] + image->page.y;
break;
}
case PolarDistortion:
{ /* 2D Cartesain to Polar View */
d.x -= coeff[2];
d.y -= coeff[3];
s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2;
s.x /= Magick2PI;
s.x -= MagickRound(s.x);
s.x *= Magick2PI; /* angle - relative to centerline */
s.y = hypot(d.x,d.y); /* radius */
/* Polar Scaling vectors are based on mapping dR and dA vectors
This results in very simple orthogonal scaling vectors
*/
if ( s.y > MagickEpsilon )
ScaleFilter( resample_filter[id],
(double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] );
else
ScaleFilter( resample_filter[id],
distort_image->columns*2, 0, 0, coeff[7] );
/* now finish mapping radius/angle to source x,y coords */
s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x;
s.y = (s.y-coeff[1])*coeff[7] + image->page.y;
break;
}
case DePolarDistortion:
{ /* @D Polar to Carteasain */
/* ignore all destination virtual offsets */
d.x = ((double)i+0.5)*output_scaling*coeff[6]-coeff[4];
d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1];
s.x = d.y*sin(d.x) + coeff[2];
s.y = d.y*cos(d.x) + coeff[3];
/* derivatives are usless - better to use SuperSampling */
break;
}
case Cylinder2PlaneDistortion:
{ /* 3D Cylinder to Tangential Plane */
double ax, cx;
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
d.x /= coeff[1]; /* x' = x/r */
ax=atan(d.x); /* aa = atan(x/r) = u/r */
cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */
s.x = coeff[1]*ax; /* u = r*atan(x/r) */
s.y = d.y*cx; /* v = y*cos(u/r) */
/* derivatives... (see personnal notes) */
ScaleFilter( resample_filter[id],
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
#if 0
if ( i == 0 && j == 0 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y );
fflush(stderr); }
#endif
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case Plane2CylinderDistortion:
{ /* 3D Cylinder to Tangential Plane */
/* relative to center of distortion */
d.x -= coeff[4]; d.y -= coeff[5];
/* is pixel valid - horizon of a infinite Virtual-Pixel Plane
* (see Anthony Thyssen's personal note) */
validity = (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5;
if ( validity > 0.0 ) {
double cx,tx;
d.x /= coeff[1]; /* x'= x/r */
cx = 1/cos(d.x); /* cx = 1/cos(x/r) */
tx = tan(d.x); /* tx = tan(x/r) */
s.x = coeff[1]*tx; /* u = r * tan(x/r) */
s.y = d.y*cx; /* v = y / cos(x/r) */
/* derivatives... (see Anthony Thyssen's personal notes) */
ScaleFilter( resample_filter[id],
cx*cx, 0.0, s.y*cx/coeff[1], cx );
#if 1
/*if ( i == 0 && j == 0 ) {*/
if ( d.x == 0.5 && d.y == 0.5 ) {
fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y);
fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n",
coeff[1], (double)(d.x * 180.0/MagickPI), validity );
fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n",
cx*cx, 0.0, s.y*cx/coeff[1], cx);
fflush(stderr); }
#endif
}
/* add center of distortion in source */
s.x += coeff[2]; s.y += coeff[3];
break;
}
case BarrelDistortion:
case BarrelInverseDistortion:
{ /* Lens Barrel Distionion Correction */
double r,fx,fy,gx,gy;
/* Radial Polynomial Distortion (de-normalized) */
d.x -= coeff[8];
d.y -= coeff[9];
r = sqrt(d.x*d.x+d.y*d.y);
if ( r > MagickEpsilon ) {
fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3];
fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7];
gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r;
gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r;
/* adjust functions and scaling for 'inverse' form */
if ( method == BarrelInverseDistortion ) {
fx = 1/fx; fy = 1/fy;
gx *= -fx*fx; gy *= -fy*fy;
}
/* Set the source pixel to lookup and EWA derivative vectors */
s.x = d.x*fx + coeff[8];
s.y = d.y*fy + coeff[9];
ScaleFilter( resample_filter[id],
gx*d.x*d.x + fx, gx*d.x*d.y,
gy*d.x*d.y, gy*d.y*d.y + fy );
}
else {
/* Special handling to avoid divide by zero when r==0
**
** The source and destination pixels match in this case
** which was set at the top of the loop using s = d;
** otherwise... s.x=coeff[8]; s.y=coeff[9];
*/
if ( method == BarrelDistortion )
ScaleFilter( resample_filter[id],
coeff[3], 0, 0, coeff[7] );
else /* method == BarrelInverseDistortion */
/* FUTURE, trap for D==0 causing division by zero */
ScaleFilter( resample_filter[id],
1.0/coeff[3], 0, 0, 1.0/coeff[7] );
}
break;
}
case ShepardsDistortion:
{ /* Shepards Method, or Inverse Weighted Distance for
displacement around the destination image control points
The input arguments are the coefficents to the function.
This is more of a 'displacement' function rather than an
absolute distortion function.
*/
size_t
i;
double
denominator;
denominator = s.x = s.y = 0;
for(i=0; i<number_arguments; i+=4) {
double weight =
((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2])
+ ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]);
if ( weight != 0 )
weight = 1/weight;
else
weight = 1;
s.x += (arguments[ i ]-arguments[i+2])*weight;
s.y += (arguments[i+1]-arguments[i+3])*weight;
denominator += weight;
}
s.x /= denominator;
s.y /= denominator;
s.x += d.x;
s.y += d.y;
/* We can not determine derivatives using shepards method
only color interpolatation, not area-resampling */
break;
}
default:
break; /* use the default no-op given above */
}
/* map virtual canvas location back to real image coordinate */
if ( bestfit && method != ArcDistortion ) {
s.x -= image->page.x;
s.y -= image->page.y;
}
s.x -= 0.5;
s.y -= 0.5;
if ( validity <= 0.0 ) {
/* result of distortion is an invalid pixel - don't resample */
SetPixelPacket(distort_image,&invalid,q,indexes);
}
else {
/* resample the source image to find its correct color */
(void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel);
/* if validity between 0.0 and 1.0 mix result with invalid pixel */
if ( validity < 1.0 ) {
/* Do a blend of sample color and invalid pixel */
/* should this be a 'Blend', or an 'Over' compose */
MagickPixelCompositeBlend(&pixel,validity,&invalid,(1.0-validity),
&pixel);
}
SetPixelPacket(distort_image,&pixel,q,indexes);
}
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(distort_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DistortImage)
#endif
proceed=SetImageProgress(image,DistortImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
distort_view=DestroyCacheView(distort_view);
resample_filter=DestroyResampleFilterThreadSet(resample_filter);
if (status == MagickFalse)
distort_image=DestroyImage(distort_image);
}
/* Arc does not return an offset unless 'bestfit' is in effect
And the user has not provided an overriding 'viewport'.
*/
if ( method == ArcDistortion && !bestfit && !viewport_given ) {
distort_image->page.x = 0;
distort_image->page.y = 0;
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(distort_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. RotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the RotateImage method is:
%
% Image *RotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *RotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*rotate_image;
MagickRealType
angle;
PointInfo
shear;
size_t
rotations;
VirtualPixelMethod
method;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
angle=degrees;
while (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon))
return(IntegralRotateImage(image,rotations,exception));
method=SetImageVirtualPixelMethod(image,BackgroundVirtualPixelMethod);
rotate_image=DistortImage(image,ScaleRotateTranslateDistortion,1,°rees,
MagickTrue,exception);
method=SetImageVirtualPixelMethod(image,method);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p a r s e C o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SparseColorImage(), given a set of coordinates, interpolates the colors
% found at those coordinates, across the whole image, using various methods.
%
% The format of the SparseColorImage() method is:
%
% Image *SparseColorImage(const Image *image,const ChannelType channel,
% const SparseColorMethod method,const size_t number_arguments,
% const double *arguments,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be filled in.
%
% o channel: Specify which color values (in RGBKA sequence) are being set.
% This also determines the number of color_values in above.
%
% o method: the method to fill in the gradient between the control points.
%
% The methods used for SparseColor() are often simular to methods
% used for DistortImage(), and even share the same code for determination
% of the function coefficents, though with more dimensions (or resulting
% values).
%
% o number_arguments: the number of arguments given.
%
% o arguments: array of floating point arguments for this method--
% x,y,color_values-- with color_values given as normalized values.
%
% o exception: return any errors or warnings in this structure
%
*/
MagickExport Image *SparseColorImage(const Image *image,
const ChannelType channel,const SparseColorMethod method,
const size_t number_arguments,const double *arguments,
ExceptionInfo *exception)
{
#define SparseColorTag "Distort/SparseColor"
SparseColorMethod
sparse_method;
double
*coeff;
Image
*sparse_image;
size_t
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
/* Determine number of color values needed per control point */
number_colors=0;
if ( channel & RedChannel ) number_colors++;
if ( channel & GreenChannel ) number_colors++;
if ( channel & BlueChannel ) number_colors++;
if ( channel & IndexChannel ) number_colors++;
if ( channel & OpacityChannel ) number_colors++;
/*
Convert input arguments into mapping coefficients, this this case
we are mapping (distorting) colors, rather than coordinates.
*/
{ DistortImageMethod
distort_method;
distort_method=(DistortImageMethod) method;
if ( distort_method >= SentinelDistortion )
distort_method = ShepardsDistortion; /* Pretend to be Shepards */
coeff = GenerateCoefficients(image, &distort_method, number_arguments,
arguments, number_colors, exception);
if ( coeff == (double *) NULL )
return((Image *) NULL);
/*
Note some Distort Methods may fall back to other simpler methods,
Currently the only fallback of concern is Bilinear to Affine
(Barycentric), which is alaso sparse_colr method. This also ensures
correct two and one color Barycentric handling.
*/
sparse_method = (SparseColorMethod) distort_method;
if ( distort_method == ShepardsDistortion )
sparse_method = method; /* return non-distiort methods to normal */
}
/* Verbose output */
if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) {
switch (sparse_method) {
case BarycentricColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n",
coeff[x], coeff[x+1], coeff[x+2]),x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
(void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n");
if ( channel & RedChannel )
(void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & GreenChannel )
(void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & BlueChannel )
(void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & IndexChannel )
(void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
if ( channel & OpacityChannel )
(void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n",
coeff[ x ], coeff[x+1],
coeff[x+2], coeff[x+3]),x+=4;
break;
}
default:
/* sparse color method is too complex for FX emulation */
break;
}
}
/* Generate new image for generated interpolated gradient.
* ASIDE: Actually we could have just replaced the colors of the original
* image, but IM Core policy, is if storage class could change then clone
* the image.
*/
sparse_image=CloneImage(image,0,0,MagickTrue,exception);
if (sparse_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sparse_image,DirectClass) == MagickFalse)
{ /* if image is ColorMapped - change it to DirectClass */
InheritException(exception,&image->exception);
sparse_image=DestroyImage(sparse_image);
return((Image *) NULL);
}
{ /* ----- MAIN CODE ----- */
CacheView
*sparse_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
j;
status=MagickTrue;
progress=0;
sparse_view=AcquireCacheView(sparse_image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (j=0; j < (ssize_t) sparse_image->rows; j++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel; /* pixel to assign to distorted image */
register IndexPacket
*restrict indexes;
register ssize_t
i;
register PixelPacket
*restrict q;
q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns,
1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(sparse_view);
GetMagickPixelPacket(sparse_image,&pixel);
for (i=0; i < (ssize_t) image->columns; i++)
{
SetMagickPixelPacket(image,q,indexes,&pixel);
switch (sparse_method)
{
case BarycentricColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i +coeff[x+1]*j
+coeff[x+2], x+=3;
break;
}
case BilinearColorInterpolate:
{
register ssize_t x=0;
if ( channel & RedChannel )
pixel.red = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & GreenChannel )
pixel.green = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & BlueChannel )
pixel.blue = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & IndexChannel )
pixel.index = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
if ( channel & OpacityChannel )
pixel.opacity = coeff[x]*i + coeff[x+1]*j +
coeff[x+2]*i*j + coeff[x+3], x+=4;
break;
}
case InverseColorInterpolate:
case ShepardsColorInterpolate:
{ /* Inverse (Squared) Distance weights average (IDW) */
size_t
k;
double
denominator;
if ( channel & RedChannel ) pixel.red = 0.0;
if ( channel & GreenChannel ) pixel.green = 0.0;
if ( channel & BlueChannel ) pixel.blue = 0.0;
if ( channel & IndexChannel ) pixel.index = 0.0;
if ( channel & OpacityChannel ) pixel.opacity = 0.0;
denominator = 0.0;
for(k=0; k<number_arguments; k+=2+number_colors) {
register ssize_t x=(ssize_t) k+2;
double weight =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( method == InverseColorInterpolate )
weight = sqrt(weight); /* inverse, not inverse squared */
weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight;
if ( channel & RedChannel )
pixel.red += arguments[x++]*weight;
if ( channel & GreenChannel )
pixel.green += arguments[x++]*weight;
if ( channel & BlueChannel )
pixel.blue += arguments[x++]*weight;
if ( channel & IndexChannel )
pixel.index += arguments[x++]*weight;
if ( channel & OpacityChannel )
pixel.opacity += arguments[x++]*weight;
denominator += weight;
}
if ( channel & RedChannel ) pixel.red /= denominator;
if ( channel & GreenChannel ) pixel.green /= denominator;
if ( channel & BlueChannel ) pixel.blue /= denominator;
if ( channel & IndexChannel ) pixel.index /= denominator;
if ( channel & OpacityChannel ) pixel.opacity /= denominator;
break;
}
case VoronoiColorInterpolate:
default:
{ /* Just use the closest control point you can find! */
size_t
k;
double
minimum = MagickHuge;
for(k=0; k<number_arguments; k+=2+number_colors) {
double distance =
((double)i-arguments[ k ])*((double)i-arguments[ k ])
+ ((double)j-arguments[k+1])*((double)j-arguments[k+1]);
if ( distance < minimum ) {
register ssize_t x=(ssize_t) k+2;
if ( channel & RedChannel ) pixel.red = arguments[x++];
if ( channel & GreenChannel ) pixel.green = arguments[x++];
if ( channel & BlueChannel ) pixel.blue = arguments[x++];
if ( channel & IndexChannel ) pixel.index = arguments[x++];
if ( channel & OpacityChannel ) pixel.opacity = arguments[x++];
minimum = distance;
}
}
break;
}
}
/* set the color directly back into the source image */
if ( channel & RedChannel ) pixel.red *= QuantumRange;
if ( channel & GreenChannel ) pixel.green *= QuantumRange;
if ( channel & BlueChannel ) pixel.blue *= QuantumRange;
if ( channel & IndexChannel ) pixel.index *= QuantumRange;
if ( channel & OpacityChannel ) pixel.opacity *= QuantumRange;
SetPixelPacket(sparse_image,&pixel,q,indexes);
q++;
indexes++;
}
sync=SyncCacheViewAuthenticPixels(sparse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SparseColorImage)
#endif
proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sparse_view=DestroyCacheView(sparse_view);
if (status == MagickFalse)
sparse_image=DestroyImage(sparse_image);
}
coeff = (double *) RelinquishMagickMemory(coeff);
return(sparse_image);
}
|
interpolation.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdint.h>
#include "../timer.h"
//------------------------------------------------------------------------------------------------------------------------------
// piecewise constant interpolation...
//
// +-------+ +---+---+
// | | | x | x |
// | x | -> +---+---+
// | | | x | x |
// +-------+ +---+---+
//
//------------------------------------------------------------------------------------------------------------------------------
void interpolation_constant(domain_type * domain, int level_f, double prescale_f, int id_f, int id_c){ // id_f = prescale*id_f + I_{2h}^{h}(id_c)
int level_c = level_f+1;
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level_f].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level_f].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int ghosts_c = domain->subdomains[box].levels[level_c].ghosts;
int pencil_c = domain->subdomains[box].levels[level_c].pencil;
int plane_c = domain->subdomains[box].levels[level_c].plane;
int ghosts_f = domain->subdomains[box].levels[level_f].ghosts;
int pencil_f = domain->subdomains[box].levels[level_f].pencil;
int plane_f = domain->subdomains[box].levels[level_f].plane;
int dim_i_f = domain->subdomains[box].levels[level_f].dim.i;
int dim_j_f = domain->subdomains[box].levels[level_f].dim.j;
int dim_k_f = domain->subdomains[box].levels[level_f].dim.k;
double * __restrict__ grid_f = domain->subdomains[box].levels[level_f].grids[id_f] + ghosts_f*(1+pencil_f+plane_f);
double * __restrict__ grid_c = domain->subdomains[box].levels[level_c].grids[id_c] + ghosts_c*(1+pencil_c+plane_c);
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k_f;k++){
for(j=0;j<dim_j_f;j++){
for(i=0;i<dim_i_f;i++){
int ijk_f = (i ) + (j )*pencil_f + (k )*plane_f;
int ijk_c = (i>>1) + (j>>1)*pencil_c + (k>>1)*plane_c;
grid_f[ijk_f] = prescale_f*grid_f[ijk_f] + grid_c[ijk_c];
}}}
}
domain->cycles.interpolation[level_f] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
// piecewise linear interpolation...
//
//------------------------------------------------------------------------------------------------------------------------------
void interpolation_linear(domain_type * domain, int level_f, double prescale_f, int id_f, int id_c){ // id_f = prescale*id_f + I_{2h}^{h}(id_c)
int level_c = level_f+1;
exchange_boundary(domain,level_c,id_c,1,1,1); // linear needs corners/edges in the coarse grid.
uint64_t _timeStart = CycleTime();
int CollaborativeThreadingBoxSize = 100000; // i.e. never
#ifdef __COLLABORATIVE_THREADING
CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING;
#endif
int omp_across_boxes = (domain->subdomains[0].levels[level_f].dim.i < CollaborativeThreadingBoxSize);
int omp_within_a_box = (domain->subdomains[0].levels[level_f].dim.i >= CollaborativeThreadingBoxSize);
int box;
#pragma omp parallel for private(box) if(omp_across_boxes)
for(box=0;box<domain->subdomains_per_rank;box++){
int i,j,k;
int ghosts_c = domain->subdomains[box].levels[level_c].ghosts;
int pencil_c = domain->subdomains[box].levels[level_c].pencil;
int plane_c = domain->subdomains[box].levels[level_c].plane;
int dim_i_c = domain->subdomains[box].levels[level_c].dim.i;
int dim_j_c = domain->subdomains[box].levels[level_c].dim.j;
int dim_k_c = domain->subdomains[box].levels[level_c].dim.k;
int ghosts_f = domain->subdomains[box].levels[level_f].ghosts;
int pencil_f = domain->subdomains[box].levels[level_f].pencil;
int plane_f = domain->subdomains[box].levels[level_f].plane;
int dim_i_f = domain->subdomains[box].levels[level_f].dim.i;
int dim_j_f = domain->subdomains[box].levels[level_f].dim.j;
int dim_k_f = domain->subdomains[box].levels[level_f].dim.k;
double * __restrict__ grid_f = domain->subdomains[box].levels[level_f].grids[ id_f] + ghosts_f*(1 + pencil_f + plane_f); // [0] is first non-ghost zone element
double * __restrict__ grid_c = domain->subdomains[box].levels[level_c].grids[ id_c] + ghosts_c*(1 + pencil_c + plane_c);
// FIX what about dirichlet boundary conditions ???
#pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2)
for(k=0;k<dim_k_f;k++){
for(j=0;j<dim_j_f;j++){
for(i=0;i<dim_i_f;i++){
int ijk_f = (i ) + (j )*pencil_f + (k )*plane_f;
int ijk_c = (i>>1) + (j>>1)*pencil_c + (k>>1)*plane_c;
// -----------------------------------------------------------------------------------------------------------------------
// Piecewise Quadratic Interpolation
// -----------------------------------------------------------------------------------------------------------------------
// define parabola f(x) = ax^2 + bx + c through three coarse grid cells in i-direction... x=-1, x=0, and x=1
// interpolate to (-0.25,j,k) and (+0.25,j,k)
// combine into 3 dimensions
//
// +-------+-------+-------+
// | o | o | o |
// +-------+-------+-------+
// .
// .
// interpolation
// .
// .
// +---+---+---+---+---+---+
// | | | x | y | | |
// +---+---+---+---+---+---+
//
#warning using 27pt stencil for piecewise-quadratic interpolation
double xm= 0.156250,x0=0.937500,xp=-0.093750;
double ym= 0.156250,y0=0.937500,yp=-0.093750;
double zm= 0.156250,z0=0.937500,zp=-0.093750;
if(i&0x1){xm=-0.093750;x0=0.937500;xp= 0.156250;}
if(j&0x1){ym=-0.093750;y0=0.937500;yp= 0.156250;}
if(k&0x1){zm=-0.093750;z0=0.937500;zp= 0.156250;}
grid_f[ijk_f] =
prescale_f*grid_f[ijk_f ] +
zm*ym*xm*grid_c[ijk_c-1-pencil_c-plane_c] +
zm*ym*x0*grid_c[ijk_c -pencil_c-plane_c] +
zm*ym*xp*grid_c[ijk_c+1-pencil_c-plane_c] +
zm*y0*xm*grid_c[ijk_c-1 -plane_c] +
zm*y0*x0*grid_c[ijk_c -plane_c] +
zm*y0*xp*grid_c[ijk_c+1 -plane_c] +
zm*yp*xm*grid_c[ijk_c-1+pencil_c-plane_c] +
zm*yp*x0*grid_c[ijk_c +pencil_c-plane_c] +
zm*yp*xp*grid_c[ijk_c+1+pencil_c-plane_c] +
z0*ym*xm*grid_c[ijk_c-1-pencil_c ] +
z0*ym*x0*grid_c[ijk_c -pencil_c ] +
z0*ym*xp*grid_c[ijk_c+1-pencil_c ] +
z0*y0*xm*grid_c[ijk_c-1 ] +
z0*y0*x0*grid_c[ijk_c ] +
z0*y0*xp*grid_c[ijk_c+1 ] +
z0*yp*xm*grid_c[ijk_c-1+pencil_c ] +
z0*yp*x0*grid_c[ijk_c +pencil_c ] +
z0*yp*xp*grid_c[ijk_c+1+pencil_c ] +
zp*ym*xm*grid_c[ijk_c-1-pencil_c+plane_c] +
zp*ym*x0*grid_c[ijk_c -pencil_c+plane_c] +
zp*ym*xp*grid_c[ijk_c+1-pencil_c+plane_c] +
zp*y0*xm*grid_c[ijk_c-1 +plane_c] +
zp*y0*x0*grid_c[ijk_c +plane_c] +
zp*y0*xp*grid_c[ijk_c+1 +plane_c] +
zp*yp*xm*grid_c[ijk_c-1+pencil_c+plane_c] +
zp*yp*x0*grid_c[ijk_c +pencil_c+plane_c] +
zp*yp*xp*grid_c[ijk_c+1+pencil_c+plane_c] ;
/*
// -----------------------------------------------------------------------------------------------------------------------
// Piecewise-Linear Interpolation
// -----------------------------------------------------------------------------------------------------------------------
// define line f(x) = bx + c through two coarse grid cells in i-direction... x=+/-1 and x=0
// interpolate to either (-0.25) or (+0.25)
// combine into 3 dimensions
//
// +-------+-------+
// | o | o |
// +-------+-------+
// .
// .
// interpolation
// .
// .
// +---+---+---+---+
// | | x | y | |
// +---+---+---+---+
//
#warning using 8pt stencil for piecewise-linear interpolation
int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward
int delta_j=-pencil_c;if(j&0x1)delta_j=pencil_c;
int delta_k= -plane_c;if(k&0x1)delta_k= plane_c;
grid_f[ijk_f] =
prescale_f*grid_f[ijk_f ] +
0.421875*grid_c[ijk_c ] +
0.140625*grid_c[ijk_c +delta_k] +
0.140625*grid_c[ijk_c +delta_j ] +
0.046875*grid_c[ijk_c +delta_j+delta_k] +
0.140625*grid_c[ijk_c+delta_i ] +
0.046875*grid_c[ijk_c+delta_i +delta_k] +
0.046875*grid_c[ijk_c+delta_i+delta_j ] +
0.015625*grid_c[ijk_c+delta_i+delta_j+delta_k] ;
*/
/*
// -----------------------------------------------------------------------------------------------------------------------
// Piecewise-Linear Interpolation
// -----------------------------------------------------------------------------------------------------------------------
#warning using 7pt stencil for piecewise-linear interpolation... doesn't given 2nd order FMG??
double coefi = -0.125;if(i&0x1)coefi = 0.125;
double coefj = -0.125;if(j&0x1)coefj = 0.125;
double coefk = -0.125;if(k&0x1)coefk = 0.125;
grid_f[ijk_f] = prescale_f*grid_f[ijk_f ] +
grid_c[ijk_c ] +
coefi*(grid_c[ijk_c+ 1]-grid_c[ijk_c- 1]) +
coefj*(grid_c[ijk_c+pencil_c]-grid_c[ijk_c-pencil_c]) +
coefk*(grid_c[ijk_c+ plane_c]-grid_c[ijk_c- plane_c]);
*/
}}}
}
domain->cycles.interpolation[level_f] += (uint64_t)(CycleTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
|
Efficient_RANSAC.h | // Copyright (c) 2015 INRIA Sophia-Antipolis (France).
// All rights reserved.
//
// This file is part of CGAL (www.cgal.org).
//
// $URL$
// $Id$
// SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial
//
//
// Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez
//
#ifndef CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
#define CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
#include <CGAL/license/Shape_detection.h>
#include <CGAL/Random.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Octree.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Shape_base.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Plane.h>
// for octree ------------------------------
#include <boost/iterator/filter_iterator.hpp>
#include <CGAL/bounding_box.h>
#include <CGAL/Iterator_range.h>
//----------
#include <vector>
#include <cmath>
#include <limits>
#include <fstream>
#include <sstream>
#include <functional>
// boost --------------
#include <CGAL/boost/iterator/counting_iterator.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
//---------------------
namespace CGAL {
namespace Shape_detection {
/*!
\ingroup PkgShapeDetectionRANSAC
\brief Shape detection algorithm based on the RANSAC method.
Given a point set in 3D space with unoriented normals, sampled on surfaces,
this class enables to detect subsets of connected points lying on the surface of primitive shapes.
Each input point is assigned to either none or at most one detected primitive
shape. The implementation follows \cgalCite{schnabel2007efficient}.
\tparam Traits must be a model of `EfficientRANSACTraits`.
*/
template<class Traits>
class Efficient_RANSAC {
public:
/// \cond SKIP_IN_MANUAL
struct Filter_unassigned_points {
Filter_unassigned_points() : m_shape_index(dummy) {}
Filter_unassigned_points(const std::vector<int> &shapeIndex)
: m_shape_index(shapeIndex) {}
bool operator()(std::size_t x) {
if (x < m_shape_index.size())
return m_shape_index[x] == -1;
else return true; // to prevent infinite incrementing
}
const std::vector<int> &m_shape_index;
std::vector<int> dummy;
};
typedef boost::filter_iterator<Filter_unassigned_points,
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t> > Point_index_iterator;
///< iterator for indices of points.
/// \endcond
/// \name Types
/// @{
/// \cond SKIP_IN_MANUAL
typedef typename Traits::Input_range::iterator Input_iterator;
typedef typename Traits::FT FT; ///< number type.
typedef typename Traits::Point_3 Point; ///< point type.
typedef typename Traits::Vector_3 Vector; ///< vector type.
/// \endcond
typedef typename Traits::Input_range Input_range;
///< Model of the concept `Range` with random access iterators, providing input points and normals
/// through the following two property maps.
typedef typename Traits::Point_map Point_map;
///< Property map to access the location of an input point.
typedef typename Traits::Normal_map Normal_map;
///< Property map to access the unoriented normal of an input point.
typedef Shape_base<Traits> Shape; ///< Shape type.
typedef Plane<Traits> Plane_shape; ///< %Plane shape type.
#ifdef DOXYGEN_RUNNING
typedef unspecified_type Shape_range;
///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`.
typedef unspecified_type Plane_range;
///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Plane_shape>`.
#else
struct Shape_range : public Iterator_range<
typename std::vector<boost::shared_ptr<Shape> >::const_iterator> {
typedef Iterator_range<
typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base;
Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > >
extracted_shapes) : Base(make_range(extracted_shapes->begin(),
extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {}
private:
boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > >
m_extracted_shapes; // keeps a reference to the shape vector
};
struct Plane_range : public Iterator_range<
typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> {
typedef Iterator_range<
typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> Base;
Plane_range(boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > >
extracted_shapes) : Base(make_range(extracted_shapes->begin(),
extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {}
private:
boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > >
m_extracted_shapes; // keeps a reference to the shape vector
};
#endif
#ifdef DOXYGEN_RUNNING
typedef unspecified_type Point_index_range;
///< `Iterator_range` with a bidirectional iterator with value type `std::size_t`
/// as indices into the input data that has not been assigned to a shape.
/// As this range class has no `size()` method, the method
/// `Efficient_RANSAC::number_of_unassigned_points()` is provided.
#else
typedef Iterator_range<Point_index_iterator>
Point_index_range;
#endif
/// @}
/// \name Parameters
/// @{
/*!
Parameters for the shape detection algorithm. They are explained in detail
in Section \ref Shape_detection_RANSACParameters of the User Manual.
*/
struct Parameters {
Parameters()
: probability((FT) 0.01), min_points((std::numeric_limits<std::size_t>::max)()), epsilon(-1),
normal_threshold((FT) 0.9), cluster_epsilon(-1) {}
/*!
Probability to control search endurance.
%Default value is 0.05.
A lower probability provides a higher reliability and determinism at the cost
of longer running time due to a higher search endurance.
It must belong to the interval [0, 1].
*/
FT probability;
/*!
Minimum number of points in a shape.
%Default value is 1% of total number of input points.
It must belong to the interval [0, +inf).
*/
std::size_t min_points;
/*!
Maximum acceptable Euclidean distance between a point and a shape.
%Default value is 1% of the bounding box diagonal.
It must belong to the interval [0, +inf).
*/
FT epsilon;
/*!
Maximum threshold on the dot product between the estimated
shape's normal and the point's normal, that is the cosine of the angle (cos(25°) = 0.9).
%Default value is 0.9 (around 25 degrees).
It must belong to the interval [0, 1].
*/
FT normal_threshold;
/*!
Maximum acceptable Euclidean distance between points, which are assumed to be neighbors.
%Default value is 1% of the bounding box diagonal.
It must belong to the interval [0, +inf).
*/
FT cluster_epsilon;
};
/// @}
private:
typedef internal::RANSAC_octree<Traits> Direct_octree;
typedef internal::RANSAC_octree<Traits> Indexed_octree;
//--------------------------------------------typedef
// Creates a function pointer for instancing shape instances.
template<class ShapeT>
static Shape *factory() {
return new ShapeT;
}
public:
/// \name Initialization
/// @{
/*!
Constructs an empty shape detection object.
*/
Efficient_RANSAC(Traits t = Traits())
: m_traits(t), m_direct_octrees(nullptr), m_global_octree(nullptr), m_num_subsets(0),
m_num_available_points(0), m_num_total_points(0), m_valid_iterators(false) {
}
/*!
Releases all memory allocated by this instance including shapes.
*/
~Efficient_RANSAC() {
clear();
}
/*!
Retrieves the traits class.
*/
const Traits &
traits() const {
return m_traits;
}
/*!
Retrieves the point property map.
*/
const Point_map &point_map() const { return m_point_pmap; }
/*!
Retrieves the normal property map.
*/
const Normal_map &normal() const { return m_normal_pmap; }
Input_iterator input_iterator_first() const {
return m_input_iterator_first;
}
Input_iterator input_iterator_beyond() const {
return m_input_iterator_beyond;
}
/*!
Sets the input data. The range must stay valid
until the detection has been performed and the access to the
results is no longer required. The data in the input is reordered by the methods
`detect()` and `preprocess()`. This function first calls `clear()`.
*/
void set_input(
Input_range &input_range,
///< Range of input data.
Point_map point_map = Point_map(),
///< Property map to access the position of an input point.
Normal_map normal_map = Normal_map()
///< Property map to access the normal of an input point.
) {
m_point_pmap = point_map;
m_normal_pmap = normal_map;
m_input_iterator_first = input_range.begin();
m_input_iterator_beyond = input_range.end();
clear();
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points = std::distance(
m_input_iterator_first, m_input_iterator_beyond);
m_valid_iterators = true;
}
/*!
Registers the shape type `ShapeType` in the detection engine that must inherit from `Shape_base`.
For example, for registering a plane as detectable shape, you should call
`ransac.add_shape_factory< Shape_detection::Plane<Traits> >();`. Note
that if your call is within a template, you should add the `template`
keyword just before `add_shape_factory`:
`ransac.template add_shape_factory< Shape_detection::Plane<Traits> >();`.
*/
template<class Shape_type>
void add_shape_factory() {
m_shape_factories.push_back(factory<Shape_type>);
}
/*!
Constructs internal data structures required for the shape detection.
These structures only depend on the input data, i.e. the points and
normal vectors. This method is called by `detect()`, if it was not called
before by the user.
*/
bool preprocess() {
if (m_num_total_points == 0)
return false;
// Generation of subsets
m_num_subsets = (std::size_t) (std::max<std::ptrdiff_t>)((std::ptrdiff_t)
std::floor(std::log(double(m_num_total_points)) /
std::log(2.)) - 9, 2);
// SUBSET GENERATION ->
// approach with increasing subset sizes -> replace with octree later on
Input_iterator last = m_input_iterator_beyond - 1;
std::size_t remainingPoints = m_num_total_points;
m_available_octree_sizes.resize(m_num_subsets);
m_direct_octrees = new Direct_octree *[m_num_subsets];
for (int s = int(m_num_subsets) - 1; s >= 0; --s) {
std::size_t subsetSize = remainingPoints;
std::vector<std::size_t> indices(subsetSize);
if (s) {
subsetSize >>= 1;
for (std::size_t i = 0; i < subsetSize; i++) {
std::size_t index = get_default_random()(2);
index = index + (i << 1);
index = (index >= remainingPoints) ? remainingPoints - 1 : index;
indices[i] = index;
}
// move points to the end of the point vector
std::size_t j = subsetSize;
do {
j--;
typename std::iterator_traits<Input_iterator>::value_type
tmp = (*last);
*last = m_input_iterator_first[indices[std::size_t(j)]];
m_input_iterator_first[indices[std::size_t(j)]] = tmp;
last--;
} while (j > 0);
m_direct_octrees[s] = new Direct_octree(
m_traits, last + 1,
last + subsetSize + 1,
m_point_pmap,
remainingPoints - subsetSize);
} else
m_direct_octrees[0] = new Direct_octree(
m_traits, m_input_iterator_first,
m_input_iterator_first + (subsetSize),
m_point_pmap,
0);
m_available_octree_sizes[s] = subsetSize;
m_direct_octrees[s]->refine(m_options.cluster_epsilon);
remainingPoints -= subsetSize;
}
m_global_octree = new Indexed_octree(
m_traits, m_input_iterator_first, m_input_iterator_beyond,
m_point_pmap
);
m_global_octree->refine(m_options.cluster_epsilon);
return true;
}
/// @}
/// \name Memory Management
/// @{
/*!
Removes all shape types registered for detection.
*/
void clear_shape_factories() {
m_shape_factories.clear();
}
/*!
Frees memory allocated for the internal search structures but keeps the detected shapes.
It invalidates the range retrieved using `unassigned_points()`.
*/
void clear_octrees() {
// If there is no data yet, there are no data structures.
if (!m_valid_iterators)
return;
if (m_global_octree) {
delete m_global_octree;
m_global_octree = nullptr;
}
if (m_direct_octrees) {
for (std::size_t i = 0; i < m_num_subsets; i++)
delete m_direct_octrees[i];
delete[] m_direct_octrees;
m_direct_octrees = nullptr;
}
m_num_subsets = 0;
}
/*!
Calls `clear_octrees()` and removes all detected shapes.
All internal structures are cleaned, including formerly detected shapes.
Thus iterators and ranges retrieved through `shapes()`, `planes()` and `indices_of_unassigned_points()`
are invalidated.
*/
void clear() {
// If there is no data yet, there are no data structures.
if (!m_valid_iterators)
return;
std::vector<int>().swap(m_shape_index);
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points;
clear_octrees();
clear_shape_factories();
}
/// @}
/// \name Detection
/// @{
/*!
Performs the shape detection. Shape types considered during the detection
are those registered using `add_shape_factory()`.
\param options parameters for shape detection
\param callback can be omitted if the algorithm should be run
without any callback. It is called regularly when the algorithm
is running: the current advancement (between 0.0 and 1.0) is
passed as parameter. If it returns `true`, then the algorithm
continues its execution normally; if it returns `false`, the
algorithm is stopped. Note that this interruption may leave the
class in an invalid state.
\return `true` if shape types have been registered and
input data has been set. Otherwise, `false` is returned.
*/
bool detect(const Parameters &options = Parameters(),
const std::function<bool(double)> &callback
= std::function<bool(double)>()) {
m_options = options;
// No shape types for detection or no points provided, exit
if (m_shape_factories.size() == 0 ||
(m_input_iterator_beyond - m_input_iterator_first) == 0)
return false;
if (m_num_subsets == 0 || m_global_octree == 0) {
if (!preprocess())
return false;
}
if (callback && !callback(0.))
return false;
// Reset data structures possibly used by former search
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points;
for (std::size_t i = 0; i < m_num_subsets; i++) {
m_available_octree_sizes[i] = m_direct_octrees[i]->size();
}
// Use bounding box diagonal as reference for default values
Bbox_3 bbox = m_global_octree->boundingBox();
FT bbox_diagonal = (FT) CGAL::sqrt(
(bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin())
+ (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin())
+ (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin()));
// Epsilon or cluster_epsilon have been set by the user?
// If not, derive from bounding box diagonal
m_options.epsilon = (m_options.epsilon < 0)
? bbox_diagonal * (FT) 0.01 : m_options.epsilon;
m_options.cluster_epsilon = (m_options.cluster_epsilon < 0)
? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon;
// Minimum number of points has been set?
m_options.min_points =
(m_options.min_points == (std::numeric_limits<std::size_t>::max)()) ?
(std::size_t)((FT)0.01 * m_num_available_points) :
m_options.min_points;
m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points;
// Initializing the shape index
m_shape_index.assign(m_num_available_points, -1);
if (m_options.min_points > m_num_available_points)
return true;
// List of all randomly drawn candidates
// with the minimum number of points
std::vector<Shape *> candidates;
// Identifying minimum number of samples
m_required_samples = 0;
for (std::size_t i = 0; i < m_shape_factories.size(); i++) {
Shape *tmp = (Shape *) m_shape_factories[i]();
m_required_samples = (std::max<std::size_t>)(m_required_samples, tmp->minimum_sample_size());
delete tmp;
}
std::size_t first_sample; // first sample for RANSAC
FT best_expected = 0;
// number of points that have been assigned to a shape
std::size_t num_invalid = 0;
std::size_t generated_candidates = 0;
std::size_t failed_candidates = 0;
std::size_t limit_failed_candidates = (std::max)(std::size_t(10000),
std::size_t(m_input_iterator_beyond
- m_input_iterator_first)
/ std::size_t(100));
bool force_exit = false;
bool keep_searching = true;
do { // main loop
best_expected = 0;
if (keep_searching)
do {
// Search (remaining_points / min_points) shapes (max 200 per iteration, min 1)
std::size_t search_number
= (std::min)(std::size_t(200),
(std::max)(std::size_t((m_num_available_points - num_invalid) / double(m_options.min_points)),
std::size_t(1)));
for (std::size_t nb = 0; nb < search_number; ++ nb)
{
// Generate candidates
//1. pick a point p1 randomly among available points
std::set<std::size_t> indices;
bool done = false;
do {
do
first_sample = get_default_random()(
static_cast<unsigned int>(m_num_available_points));
while (m_shape_index[first_sample] != -1);
done = drawSamplesFromCellContainingPoint
(m_global_octree,
get(m_point_pmap,
*(m_input_iterator_first + first_sample)),
select_random_octree_level(),
indices,
m_shape_index,
m_required_samples);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
} while (m_shape_index[first_sample] != -1 || !done);
generated_candidates++;
//add candidate for each type of primitives
for(typename std::vector<Shape *(*)()>::iterator it =
m_shape_factories.begin(); it != m_shape_factories.end(); it++) {
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
Shape *p = (Shape *) (*it)();
//compute the primitive and says if the candidate is valid
p->compute(indices,
m_input_iterator_first,
m_traits,
m_point_pmap,
m_normal_pmap,
m_options.epsilon,
m_options.normal_threshold);
if (p->is_valid()) {
improve_bound(p, m_num_available_points - num_invalid, 1, 500);
//evaluate the candidate
if(p->max_bound() >= m_options.min_points && p->score() > 0) {
if (best_expected < p->expected_value())
best_expected = p->expected_value();
candidates.push_back(p);
}
else {
failed_candidates++;
delete p;
}
}
else {
failed_candidates++;
delete p;
}
}
}
if (failed_candidates >= limit_failed_candidates)
{
force_exit = true;
}
keep_searching = (stop_probability(m_options.min_points,
m_num_available_points - num_invalid,
generated_candidates, m_global_octree->maxLevel())
> m_options.probability);
} while (!force_exit
&& stop_probability((std::size_t) best_expected,
m_num_available_points - num_invalid,
generated_candidates,
m_global_octree->maxLevel())
> m_options.probability
&& keep_searching);
// end of generate candidate
if (force_exit) {
break;
}
if (candidates.empty())
continue;
// Now get the best candidate in the current set of all candidates
// Note that the function sorts the candidates:
// the best candidate is always the last element of the vector
Shape *best_candidate =
get_best_candidate(candidates, m_num_available_points - num_invalid);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// If search is done and the best candidate is too small, we are done.
if (!keep_searching && best_candidate->m_score < m_options.min_points)
break;
if (!best_candidate)
continue;
best_candidate->m_indices.clear();
best_candidate->m_score =
score(m_global_octree,
best_candidate,
m_shape_index,
FT(3) * m_options.epsilon,
m_options.normal_threshold);
best_expected = static_cast<FT>(best_candidate->m_score);
best_candidate->connected_component(best_candidate->m_indices,
m_options.cluster_epsilon);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// check score against min_points and clear out candidates if too low
if (best_candidate->indices_of_assigned_points().size() <
m_options.min_points) {
if (!(best_candidate->indices_of_assigned_points().empty()))
for (std::size_t i = 0; i < candidates.size() - 1; i++) {
if (best_candidate->is_same(candidates[i])) {
delete candidates[i];
candidates[i] = nullptr;
}
}
candidates.back() = nullptr;
delete best_candidate;
best_candidate = nullptr;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// Trimming candidates list
std::size_t empty = 0, occupied = 0;
while (empty < candidates.size()) {
while (empty < candidates.size() && candidates[empty]) empty++;
if (empty >= candidates.size())
break;
if (occupied < empty)
occupied = empty + 1;
while (occupied < candidates.size() && !candidates[occupied])
occupied++;
if (occupied >= candidates.size())
break;
candidates[empty] = candidates[occupied];
candidates[occupied] = nullptr;
empty++;
occupied++;
}
candidates.resize(empty);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
} else if (stop_probability((std::size_t) best_candidate->expected_value(),
(m_num_available_points - num_invalid),
generated_candidates,
m_global_octree->maxLevel())
<= m_options.probability) {
// Remove candidate from list
candidates.back() = nullptr;
//1. add best candidate to final result.
m_extracted_shapes->push_back(
boost::shared_ptr<Shape>(best_candidate));
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
//2. remove the points
const std::vector<std::size_t> &indices_points_best_candidate =
best_candidate->indices_of_assigned_points();
// update generated candidates to reflect removal of points
generated_candidates = std::size_t(std::pow(1.f - (indices_points_best_candidate.size() /
float(m_num_available_points - num_invalid)), 3.f)
* generated_candidates);
//2.3 Remove the points from the subtrees
for (std::size_t i = 0; i < indices_points_best_candidate.size(); i++) {
m_shape_index[indices_points_best_candidate.at(i)] =
int(m_extracted_shapes->size()) - 1;
num_invalid++;
for (std::size_t j = 0; j < m_num_subsets; j++) {
if (m_direct_octrees[j]) {
std::size_t offset = m_direct_octrees[j]->offset();
if (offset <= indices_points_best_candidate.at(i) &&
(indices_points_best_candidate.at(i) - offset)
< m_direct_octrees[j]->size()) {
m_available_octree_sizes[j]--;
}
}
}
}
failed_candidates = 0;
best_expected = 0;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
std::vector<std::size_t> subset_sizes(m_num_subsets);
subset_sizes[0] = m_available_octree_sizes[0];
for (std::size_t i = 1; i < m_num_subsets; i++) {
subset_sizes[i] = subset_sizes[i - 1] + m_available_octree_sizes[i];
}
//3. Remove points from candidates common with extracted primitive
//#pragma omp parallel for
best_expected = 0;
for (std::size_t i = 0; i < candidates.size() - 1; i++) {
if (candidates[i]) {
candidates[i]->update_points(m_shape_index);
candidates[i]->compute_bound(
subset_sizes[candidates[i]->m_nb_subset_used - 1],
m_num_available_points - num_invalid);
if (candidates[i]->max_bound() < m_options.min_points) {
delete candidates[i];
candidates[i] = nullptr;
} else {
best_expected = (candidates[i]->expected_value() > best_expected) ?
candidates[i]->expected_value() : best_expected;
}
}
}
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
std::size_t start = 0, end = candidates.size() - 1;
while (start < end) {
while (candidates[start] && start < end) start++;
while (!candidates[end] && start < end) end--;
if (!candidates[start] && candidates[end] && start < end) {
candidates[start] = candidates[end];
candidates[end] = nullptr;
start++;
end--;
}
}
if (candidates[end]) end++;
candidates.resize(end);
} else if (!keep_searching)
++generated_candidates;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
keep_searching = (stop_probability(m_options.min_points,
m_num_available_points - num_invalid,
generated_candidates,
m_global_octree->maxLevel())
> m_options.probability);
} while ((keep_searching
&& FT(m_num_available_points - num_invalid) >= m_options.min_points)
|| best_expected >= m_options.min_points);
// Clean up remaining candidates.
for (std::size_t i = 0; i < candidates.size(); i++)
delete candidates[i];
candidates.resize(0);
m_num_available_points -= num_invalid;
return true;
}
/// @}
/// \name Access
/// @{
/*!
Returns an `Iterator_range` with a bidirectional iterator with value type
`boost::shared_ptr<Shape>` over the detected shapes in the order of detection.
Depending on the chosen probability
for the detection, the shapes are ordered with decreasing size.
*/
Shape_range shapes() const {
return Shape_range(m_extracted_shapes);
}
/*!
Returns an `Iterator_range` with a bidirectional iterator with
value type `boost::shared_ptr<Plane_shape>` over only the
detected planes in the order of detection. Depending on the
chosen probability for the detection, the planes are ordered
with decreasing size.
*/
Plane_range planes() const {
boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > planes
= boost::make_shared<std::vector<boost::shared_ptr<Plane_shape> > >();
for (std::size_t i = 0; i < m_extracted_shapes->size(); ++i) {
boost::shared_ptr<Plane_shape> pshape
= boost::dynamic_pointer_cast<Plane_shape>((*m_extracted_shapes)[i]);
// Ignore all shapes other than plane
if (pshape != boost::shared_ptr<Plane_shape>())
planes->push_back(pshape);
}
return Plane_range(planes);
}
/*!
Number of points not assigned to a shape.
*/
std::size_t number_of_unassigned_points() const {
return m_num_available_points;
}
/*!
Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t`
as indices into the input data that has not been assigned to a shape.
*/
Point_index_range indices_of_unassigned_points() {
Filter_unassigned_points fup(m_shape_index);
Point_index_iterator p1 =
boost::make_filter_iterator<Filter_unassigned_points>(
fup,
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(0),
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(m_shape_index.size()));
return make_range(p1, Point_index_iterator(p1.end()));
}
/// @}
private:
int select_random_octree_level() {
auto upper_bound = static_cast<unsigned int>(m_global_octree->maxLevel() + 1);
return (int) get_default_random()(upper_bound);
}
Shape *get_best_candidate(std::vector<Shape *> &candidates,
const std::size_t num_available_points) {
if (candidates.size() == 1)
return candidates.back();
int index_worse_candidate = 0;
bool improved = true;
while (index_worse_candidate < (int) candidates.size() - 1 && improved) {
improved = false;
typename Shape::Compare_by_max_bound comp;
std::sort(candidates.begin() + index_worse_candidate,
candidates.end(),
comp);
//refine the best one
improve_bound(candidates.back(),
num_available_points, m_num_subsets,
m_options.min_points);
int position_stop;
//Take all those intersecting the best one, check for equal ones
for (position_stop = int(candidates.size()) - 1;
position_stop > index_worse_candidate;
position_stop--) {
if (candidates.back()->min_bound() >
candidates.at(position_stop)->max_bound())
break;//the intervals do not overlaps anymore
if (candidates.at(position_stop)->max_bound()
<= m_options.min_points)
break; //the following candidate doesn't have enough points!
//if we reach this point, there is an overlap
// between best one and position_stop
//so request refining bound on position_stop
improved |= improve_bound(candidates.at(position_stop),
num_available_points,
m_num_subsets,
m_options.min_points);
//test again after refined
if (candidates.back()->min_bound() >
candidates.at(position_stop)->max_bound())
break;//the intervals do not overlaps anymore
}
index_worse_candidate = position_stop;
}
return candidates.back();
}
bool improve_bound(Shape *candidate,
std::size_t num_available_points,
std::size_t max_subset,
std::size_t min_points) {
if (candidate->m_nb_subset_used >= max_subset)
return false;
if (candidate->m_nb_subset_used >= m_num_subsets)
return false;
candidate->m_nb_subset_used =
(candidate->m_nb_subset_used >= m_num_subsets) ?
m_num_subsets - 1 : candidate->m_nb_subset_used;
//what it does is add another subset and recompute lower and upper bound
//the next subset to include is provided by m_nb_subset_used
std::size_t num_points_evaluated = 0;
for (std::size_t i = 0; i < candidate->m_nb_subset_used; i++)
num_points_evaluated += m_available_octree_sizes[i];
// need score of new subset as well as sum of
// the score of the previous considered subset
std::size_t new_score = 0;
std::size_t new_sampled_points = 0;
do {
new_score =
score(m_direct_octrees[candidate->m_nb_subset_used],
candidate,
m_shape_index,
m_options.epsilon,
m_options.normal_threshold);
candidate->m_score += new_score;
num_points_evaluated +=
m_available_octree_sizes[candidate->m_nb_subset_used];
new_sampled_points +=
m_available_octree_sizes[candidate->m_nb_subset_used];
candidate->m_nb_subset_used++;
} while (new_sampled_points < min_points &&
candidate->m_nb_subset_used < m_num_subsets);
candidate->m_score = candidate->m_indices.size();
candidate->compute_bound(num_points_evaluated, num_available_points);
return true;
}
inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const {
return (std::min<FT>)(std::pow(FT(1) - FT(largest_candidate)
/ (FT(num_pts) * FT(octree_depth+1)
* FT(1 << (m_required_samples - 1))),
int(num_candidates)), FT(1));
}
template<class Octree>
std::size_t score(const Octree *octree,
Shape *candidate,
std::vector<int> &shapeIndex,
FT epsilon,
FT normal_threshold) {
typedef typename Octree::Node Cell;
std::stack<Cell> stack;
stack.push(octree->root());
while (!stack.empty()) {
Cell cell = stack.top();
stack.pop();
FT width = octree->width() / (1 << (cell.depth()));
FT diag = CGAL::sqrt(FT(3) * width * width) + epsilon;
FT dist = candidate->squared_distance(octree->barycenter(cell));
if (dist > (diag * diag))
continue;
// differ between full or partial overlap?
// if full overlap further traversal of this branch is not necessary
if (cell.is_leaf()) {
std::vector<std::size_t> indices;
indices.reserve(cell.size());
for (std::size_t i = 0; i < cell.size(); i++) {
if (shapeIndex[octree->index(cell, i)] == -1) {
indices.push_back(octree->index(cell, i));
}
}
candidate->cost_function(epsilon,
normal_threshold,
indices);
} else {
if (!cell.is_leaf()) {
for (std::size_t i = 0; i < 8; i++) {
if (!cell[i].empty())
stack.push(cell[i]);
}
}
}
}
return candidate->m_indices.size();
}
template<class Octree>
const typename Octree::Node node_containing_point(const Octree *octree, const Point &p, std::size_t level) {
// Find the node containing the point
typename Octree::Node cur = octree->root();
while (!cur.is_null() && cur.depth() < level) {
// Determine the coordinate of the child
std::bitset<3> coordinate;
coordinate[0] = octree->barycenter(cur).x() <= p.x();
coordinate[1] = octree->barycenter(cur).y() <= p.y();
coordinate[2] = octree->barycenter(cur).z() <= p.z();
// If cur is a leaf node, its child is null
if (cur.is_leaf())
return typename Octree::Node();
// Otherwise, return the correct child of cur
cur = cur[coordinate.to_ulong()];
// If that child is empty, return null
if (cur.empty())
return typename Octree::Node();
}
return cur;
}
template<class Octree>
bool drawSamplesFromCellContainingPoint(const Octree *octree,
const Point &p,
std::size_t level,
std::set<std::size_t> &indices,
const std::vector<int> &shapeIndex,
std::size_t requiredSamples) {
typedef typename Octree::Node Cell;
const Cell cur = node_containing_point(octree, p, level);
// Stop if the node we need doesn't exist
if (cur.is_null())
return false;
// Count point indices that map to -1 in the shape index
std::size_t enough = 0;
for (auto j : cur) {
if (shapeIndex[j] == -1)
enough++;
if (enough >= requiredSamples)
break;
}
// Make sure we found enough samples
if (enough < requiredSamples)
return false;
do {
std::size_t p = CGAL::get_default_random().
uniform_int<std::size_t>(0, cur.size() - 1);
std::size_t j = octree->index(cur, p);
if (shapeIndex[j] == -1)
indices.insert(j);
} while (indices.size() < requiredSamples);
return true;
}
private:
Parameters m_options;
// Traits class.
Traits m_traits;
// Octrees build on input data for quick shape evaluation and
// sample selection within an octree cell.
Direct_octree **m_direct_octrees;
Indexed_octree *m_global_octree;
std::vector<std::size_t> m_available_octree_sizes;
std::size_t m_num_subsets;
// maps index into points to assigned extracted primitive
std::vector<int> m_shape_index;
std::size_t m_num_available_points;
std::size_t m_num_total_points;
std::size_t m_required_samples;
//give the index of the subset of point i
std::vector<int> m_index_subsets;
boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes;
std::vector<Shape *(*)()> m_shape_factories;
// iterators of input data
bool m_valid_iterators;
Input_iterator m_input_iterator_first, m_input_iterator_beyond;
Point_map m_point_pmap;
Normal_map m_normal_pmap;
};
}
}
#endif // CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
|
hierarchical_sne_inl.h | /*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef HIERARCHICAL_SNE_INL
#define HIERARCHICAL_SNE_INL
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "hdi/dimensionality_reduction/hierarchical_sne.h"
#include "hdi/utils/math_utils.h"
#include "hdi/utils/log_helper_functions.h"
#include "hdi/utils/scoped_timers.h"
#include <random>
#include <chrono>
#include <unordered_set>
#include <unordered_map>
#include <numeric>
#include "hdi/utils/memory_utils.h"
#include "hdi/data/map_mem_eff.h"
#include "hdi/data/map_helpers.h"
#include "hdi/data/io.h"
#include "hdi/utils/log_progress.h"
#ifdef HNSWLIB_FOUND
#ifdef _MSC_VER
#if(_MSC_VER >= 1900)
#include "hnswlib/hnswlib.h"
#include "hnswlib/space_l2.h"
#define HNSWLIB_SUPPORTED
#endif //(_MSC_VER >= 1900)
#else // _MSC_VER
#if (__cplusplus >=201103)
#include "hnswlib/hnswlib.h"
#include "hnswlib/space_l2.h"
#define HNSWLIB_SUPPORTED
#endif //(__cplusplus >=201103)
#endif // _MSC_VER
#endif //HNSWLIB_FOUND
#ifdef __USE_ANNOY__
#ifndef WIN32
#define isnan std::isnan
#endif
#include "annoylib.h"
#include "kissrandom.h"
#ifndef WIN32
#undef isnan
#endif
#endif // __USE_ANNOY__
#pragma warning( push )
#pragma warning( disable : 4267)
#pragma warning( push )
#pragma warning( disable : 4291)
#pragma warning( push )
#pragma warning( disable : 4996)
#pragma warning( push )
#pragma warning( disable : 4018)
#pragma warning( push )
#pragma warning( disable : 4244)
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
namespace hdi {
namespace dr {
/////////////////////////////////////////////////////////////////////////
template <typename scalar_type, typename sparse_scalar_matrix_type>
HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::Parameters::Parameters() :
_seed(-1),
_num_neighbors(30),
_aknn_num_trees(4),
_aknn_num_checks(1024),
_aknn_algorithm(hdi::dr::KNN_ANNOY),
_aknn_metric(hdi::dr::KNN_METRIC_EUCLIDEAN),
_aknn_algorithmP1(16), // default parameter for HNSW
_aknn_algorithmP2(200), // default parameter for HNSW
_monte_carlo_sampling(true),
_mcmcs_num_walks(10),
_mcmcs_landmark_thresh(1.5),
_mcmcs_walk_length(10),
_hard_cut_off(false),
_hard_cut_off_percentage(0.1f),
_rs_reduction_factor_per_layer{ static_cast<scalar_type>(.1) },
_rs_outliers_removal_jumps(10),
_num_walks_per_landmark(100),
_transition_matrix_prune_thresh(1.5),
_out_of_core_computation(false)
{}
/////////////////////////////////////////////////////////////////////////
template <typename scalar_type, typename sparse_scalar_matrix_type>
HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::Statistics::Statistics() :
_total_time(-1),
_init_knn_time(-1),
_init_probabilities_time(-1),
_init_fmc_time(-1),
_mcmc_sampling_time(-1),
_landmarks_selection_time(-1),
_landmarks_selection_num_walks(-1),
_aoi_time(-1),
_fmc_time(-1),
_aoi_num_walks(-1),
_aoi_sparsity(-1),
_fmc_sparsity(-1),
_fmc_effective_sparsity(-1)
{}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::Statistics::reset() {
_total_time = -1;
_init_knn_time = -1;
_init_probabilities_time = -1;
_init_fmc_time = -1;
_mcmc_sampling_time = -1;
_landmarks_selection_time = -1;
_landmarks_selection_num_walks = -1;
_aoi_time = -1;
_fmc_time = -1;
_aoi_num_walks = -1;
_aoi_sparsity = -1;
_fmc_sparsity = -1;
_fmc_effective_sparsity = -1;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::Statistics::log(utils::AbstractLog* logger)const {
utils::secureLog(logger, "\n--------------- Hierarchical-SNE Statistics ------------------");
utils::secureLogValue(logger, "Total time", _total_time);
if (_init_knn_time != -1) { utils::secureLogValue(logger, "\tAKNN graph computation time", _init_knn_time, true, 2); }
if (_init_probabilities_time != -1) { utils::secureLogValue(logger, "\tTransition probabilities computation time", _init_probabilities_time, true, 1); }
if (_init_fmc_time != -1) { utils::secureLogValue(logger, "\tFMC computation time", _init_fmc_time, true, 3); }
if (_mcmc_sampling_time != -1) { utils::secureLogValue(logger, "\tMarkov Chain Monte Carlo sampling time", _mcmc_sampling_time, true, 1); }
if (_landmarks_selection_time != -1) { utils::secureLogValue(logger, "\tLandmark selection time", _landmarks_selection_time, true, 2); }
if (_landmarks_selection_num_walks != -1) { utils::secureLogValue(logger, "\tLndks Slct #walks", _landmarks_selection_num_walks, true, 3); }
if (_aoi_time != -1) { utils::secureLogValue(logger, "\tArea of Influence computation time", _aoi_time, true, 1); }
if (_fmc_time != -1) { utils::secureLogValue(logger, "\tFMC computation time", _fmc_time, true, 3); }
if (_aoi_num_walks != -1) { utils::secureLogValue(logger, "\tAoI #walks", _aoi_num_walks, true, 4); }
if (_aoi_sparsity != -1) { utils::secureLogValue(logger, "\tIs sparsity (%)", _aoi_sparsity * 100, true, 3); }
if (_fmc_sparsity != -1) { utils::secureLogValue(logger, "\tTs sparsity (%)", _fmc_sparsity * 100, true, 3); }
if (_fmc_effective_sparsity != -1) { utils::secureLogValue(logger, "\tTs effective sparsity (%)", _fmc_effective_sparsity * 100, true, 2); }
utils::secureLog(logger, "--------------------------------------------------------------\n");
}
/////////////////////////////////////////////////////////////////////////
template <typename scalar_type, typename sparse_scalar_matrix_type>
scalar_type HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::Scale::mimMemoryOccupation()const {
scalar_type mem(0);
mem += _landmark_to_original_data_idx.capacity() * sizeof(unsigned_int_type);
mem += _landmark_to_previous_scale_idx.capacity() * sizeof(unsigned_int_type);
mem += _landmark_weight.capacity() * sizeof(scalar_type);
for (int i = 0; i < _transition_matrix.size(); ++i) {
mem += _transition_matrix[i].size()*(sizeof(unsigned_int_type) + sizeof(scalar_type));
}
mem += _previous_scale_to_landmark_idx.capacity() * sizeof(int_type);
for (int i = 0; i < _area_of_influence.size(); ++i) {
mem += _area_of_influence[i].size()*(sizeof(unsigned_int_type) + sizeof(scalar_type));
}
return mem / 1024 / 1024;
}
/////////////////////////////////////////////////////////////////////////
template <typename scalar_type, typename sparse_scalar_matrix_type>
HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::HierarchicalSNE() :
_initialized(false),
_dimensionality(0),
_logger(nullptr),
_high_dimensional_data(nullptr),
_verbose(false)
{
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::reset() {
_initialized = false;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::clear() {
_high_dimensional_data = nullptr;
_initialized = false;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getHighDimensionalDescriptor(scalar_vector_type& data_point, data_handle_type handle)const {
data_point.resize(_dimensionality);
for (unsigned_int_type i = 0; i < _dimensionality; ++i) {
data_point[i] = *(_high_dimensional_data + handle * _dimensionality + i);
}
}
/////////////////////////////////////////////////////////////////////////
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::initialize(scalar_type* high_dimensional_data, unsigned_int_type num_dps, Parameters params) {
_statistics.reset();
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time);
utils::secureLog(_logger, "Initializing Hierarchical-SNE...");
_params = params;
_high_dimensional_data = high_dimensional_data;
_num_dps = num_dps;
utils::secureLogValue(_logger, "Number of data points", _num_dps);
initializeFirstScale();
_initialized = true;
utils::secureLog(_logger, "Initialization complete!");
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::initialize(const sparse_scalar_matrix_type& similarities, Parameters params) {
_statistics.reset();
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time);
utils::secureLog(_logger, "Initializing Hierarchical-SNE...");
_params = params;
_high_dimensional_data = nullptr;
_num_dps = similarities.size();
utils::secureLogValue(_logger, "Number of data points", _num_dps);
initializeFirstScale(similarities);
_initialized = true;
utils::secureLog(_logger, "Initialization complete!");
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
bool HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::addScale() {
_statistics.reset();
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time);
bool res(true);
if (_params._out_of_core_computation) {
addScaleOutOfCoreImpl();
}
else {
addScaleImpl();
}
_statistics.log(_logger);
return res;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::computeNeighborhoodGraph(scalar_vector_type& distance_based_probabilities, std::vector<int>& neighborhood_graph) {
unsigned_int_type nn = _params._num_neighbors + 1;
scalar_type perplexity = _params._num_neighbors / 3.;
neighborhood_graph.resize(_num_dps*nn);
distance_based_probabilities.resize(_num_dps*nn);
// Fallback to ANNOY if others are not supported
#ifndef HNSWLIB_SUPPORTED
if (_params._aknn_algorithm == hdi::dr::KNN_HNSW)
{
hdi::utils::secureLog(_logger, "HNSW not available, falling back to ANNOY");
_params._aknn_algorithm = hdi::dr::KNN_ANNOY;
}
#endif // HNSWLIB_SUPPORTED
#ifndef __USE_ANNOY__
if (_params._aknn_algorithm == hdi::dr::KNN_ANNOY)
{
_params._aknn_algorithm = hdi::dr::KNN_HNSW;
}
#endif // __USE_ANNOY__
if (_params._aknn_algorithm == hdi::dr::KNN_HNSW)
{
#ifdef HNSWLIB_SUPPORTED
utils::secureLog(_logger, "Computing the neighborhood graph with HNSW Lib...");
hnswlib::SpaceInterface<float> *space = NULL;
switch (_params._aknn_metric) {
case hdi::dr::KNN_METRIC_EUCLIDEAN:
space = new hnswlib::L2Space(_dimensionality);
break;
case hdi::dr::KNN_METRIC_INNER_PRODUCT:
space = new hnswlib::InnerProductSpace(_dimensionality);
break;
default:
space = new hnswlib::L2Space(_dimensionality);
break;
}
hnswlib::HierarchicalNSW<scalar_type> appr_alg(space, _num_dps, _params._aknn_algorithmP1, _params._aknn_algorithmP2, 0);
utils::secureLog(_logger, "\tBuilding the trees...");
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._init_knn_time);
appr_alg.addPoint((void*)_high_dimensional_data, (std::size_t) 0);
unsigned num_threads = std::thread::hardware_concurrency();
hnswlib::ParallelFor(1, _num_dps, num_threads, [&](size_t i, size_t threadId) {
appr_alg.addPoint((void*)(_high_dimensional_data + (i * _dimensionality)), (hnswlib::labeltype) i);
});
utils::secureLog(_logger, "\tAKNN queries...");
// #pragma omp parallel for
for (int i = 0; i < _num_dps; ++i)
{
auto top_candidates = appr_alg.searchKnn(_high_dimensional_data + (i*_dimensionality), (hnswlib::labeltype)nn);
scalar_type *distances = distance_based_probabilities.data() + (i*nn);
int *indices = neighborhood_graph.data() + (i*nn);
int j = 0;
assert(top_candidates.size() == nn);
while (top_candidates.size() > 0)
{
auto rez = top_candidates.top();
distances[nn - j - 1] = rez.first;
indices[nn - j - 1] = rez.second;
top_candidates.pop();
++j;
}
}
#endif // HNSWLIB_SUPPORTED
}
else if (_params._aknn_algorithm == hdi::dr::KNN_ANNOY)
{
#ifdef __USE_ANNOY__
hdi::utils::secureLog(_logger, "Computing approximated knn with Annoy...");
int search_k = nn * _params._aknn_num_trees;
AnnoyIndexInterface<int, double>* tree = NULL;
switch (_params._aknn_metric) {
case hdi::dr::KNN_METRIC_EUCLIDEAN:
hdi::utils::secureLog(_logger, "Computing approximated knn with Annoy using Euclidean distances ...");
tree = new AnnoyIndex<int, double, Euclidean, Kiss32Random, AnnoyIndexSingleThreadedBuildPolicy>(_dimensionality);
break;
case hdi::dr::KNN_METRIC_COSINE:
hdi::utils::secureLog(_logger, "Computing approximated knn with Annoy using Cosine distances ...");
tree = new AnnoyIndex<int, double, Angular, Kiss32Random, AnnoyIndexSingleThreadedBuildPolicy>(_dimensionality);
break;
case hdi::dr::KNN_METRIC_MANHATTAN:
hdi::utils::secureLog(_logger, "Computing approximated knn with Annoy using Manhattan distances ...");
tree = new AnnoyIndex<int, double, Manhattan, Kiss32Random, AnnoyIndexSingleThreadedBuildPolicy>(_dimensionality);
break;
//case hdi::dr::KNN_METRIC_HAMMING:
// hdi::utils::secureLog(_logger, "Computing approximated knn with Annoy using Euclidean distances ...");
// tree = new AnnoyIndex<int, double, Hamming, Kiss32Random>(num_dim);
// break;
case hdi::dr::KNN_METRIC_DOT:
hdi::utils::secureLog(_logger, "Computing approximated knn with Annoy using Dot product distances ...");
tree = new AnnoyIndex<int, double, DotProduct, Kiss32Random, AnnoyIndexSingleThreadedBuildPolicy>(_dimensionality);
break;
default:
hdi::utils::secureLog(_logger, "Computing approximated knn with Annoy using Euclidean distances ...");
tree = new AnnoyIndex<int, double, Euclidean, Kiss32Random, AnnoyIndexSingleThreadedBuildPolicy>(_dimensionality);
break;
}
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._init_knn_time);
for (int i = 0; i < _num_dps; ++i) {
double* vec = new double[_dimensionality];
for (int z = 0; z < _dimensionality; ++z) {
vec[z] = _high_dimensional_data[i * _dimensionality + z];
}
tree->add_item(i, vec);
}
tree->build(_params._aknn_num_trees);
// Sample check if it returns enough neighbors
std::vector<int> closest;
std::vector<double> closest_distances;
for (int n = 0; n < 100; n++) {
tree->get_nns_by_item(n, nn, search_k, &closest, &closest_distances);
unsigned int neighbors_count = closest.size();
if (neighbors_count < nn) {
printf("Requesting %d neighbors, but ANNOY returned only %u. Please increase search_k\n", nn, neighbors_count);
return;
}
}
hdi::utils::secureLog(_logger, "Done building tree. Beginning nearest neighbor search... ");
#pragma omp parallel for
for (int n = 0; n < _num_dps; n++)
{
// Find nearest neighbors
std::vector<int> closest;
std::vector<double> closest_distances;
tree->get_nns_by_item(n, nn, search_k, &closest, &closest_distances);
// Copy current row
for (unsigned int m = 0; m < nn; m++) {
neighborhood_graph[n * nn + m] = closest[m];
distance_based_probabilities[n * nn + m] = closest_distances[m] * closest_distances[m];
}
}
}
delete tree;
#endif // __USE_ANNOY__
}
{
utils::secureLog(_logger, "\tFMC computation...");
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._init_probabilities_time);
//#ifdef __USE_GCD__
// std::cout << "GCD dispatch, hierarchical_sne_inl 253.\n";
// dispatch_apply(_num_dps, dispatch_get_global_queue(0, 0), ^(size_t d) {
//#else
#pragma omp parallel for
for (int_type d = 0; d < _num_dps; ++d) {
//#endif //__USE_GCD__
//It could be that the point itself is not the nearest one if two points are identical... I want the point itself to be the first one!
if (neighborhood_graph[d*nn] != d) {
int to_swap = d * nn;
for (; to_swap < d*nn + (nn-1); ++to_swap) {
if (neighborhood_graph[to_swap] == d)
break;
}
std::swap(neighborhood_graph[nn*d], neighborhood_graph[to_swap]);
std::swap(distance_based_probabilities[nn*d], distance_based_probabilities[to_swap]);
}
scalar_vector_type temp_probability(nn, 0);
utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>(
distance_based_probabilities.cbegin() + d * nn,
distance_based_probabilities.cbegin() + (d + 1)*nn,
temp_probability.begin(),
temp_probability.begin() + nn,
perplexity,
200,
1e-5,
0
);
distance_based_probabilities[d*nn] = 0;
for (unsigned_int_type n = 1; n < nn; ++n) {
distance_based_probabilities[d*nn + n] = temp_probability[n];
}
}
//#ifdef __USE_GCD__
// );
//#endif
}
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::initializeFirstScale() {
utils::secureLog(_logger, "Initializing the first scale...");
_hierarchy.clear();
_hierarchy.push_back(Scale());
Scale& scale = _hierarchy[0];
scalar_vector_type distance_based_probabilities;
std::vector<int> neighborhood_graph;
computeNeighborhoodGraph(distance_based_probabilities, neighborhood_graph);
unsigned_int_type nn = _params._num_neighbors + 1;
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._init_fmc_time);
utils::secureLog(_logger, "Creating transition matrix...");
scale._landmark_to_original_data_idx.resize(_num_dps);
std::iota(scale._landmark_to_original_data_idx.begin(), scale._landmark_to_original_data_idx.end(), 0);
scale._landmark_to_previous_scale_idx = scale._landmark_to_original_data_idx;
scale._landmark_weight.resize(_num_dps, 1);
scale._transition_matrix.resize(_num_dps);
//#ifdef __USE_GCD__
// std::cout << "GCD dispatch, hierarchical_sne_inl 253.\n";
// dispatch_apply(_num_dps, dispatch_get_global_queue(0, 0), ^(size_t i) {
//#else
#pragma omp parallel for
for (int i = 0; i < _num_dps; ++i) {
//#endif //__USE_GCD__
scalar_type sum = 0;
for (int n = 1; n < nn; ++n) {
int idx = i * nn + n;
auto v = distance_based_probabilities[idx];
sum += v;
scale._transition_matrix[i][neighborhood_graph[idx]] = v;
}
}
//#ifdef __USE_GCD__
// );
//#endif
}
utils::secureLogValue(_logger, "Min memory requirements (MB)", scale.mimMemoryOccupation());
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::initializeFirstScale(const sparse_scalar_matrix_type& similarities) {
utils::secureLog(_logger, "Initializing the first scale...");
_hierarchy.clear();
_hierarchy.push_back(Scale());
Scale& scale = _hierarchy[0];
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._init_fmc_time);
utils::secureLog(_logger, "Creating transition matrix...");
scale._landmark_to_original_data_idx.resize(_num_dps);
std::iota(scale._landmark_to_original_data_idx.begin(), scale._landmark_to_original_data_idx.end(), 0);
scale._landmark_to_previous_scale_idx = scale._landmark_to_original_data_idx;
scale._landmark_weight.resize(_num_dps, 1);
scale._transition_matrix = similarities;
}
utils::secureLogValue(_logger, "Min memory requirements (MB)", scale.mimMemoryOccupation());
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::selectLandmarks(const Scale& previous_scale, Scale& scale, unsigned_int_type& selected_landmarks) {
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._landmarks_selection_time);
utils::secureLog(_logger, "Landmark selection with fixed reduction...");
const unsigned_int_type previous_scale_dp = previous_scale._transition_matrix.size();
const unsigned_int_type num_landmarks = previous_scale_dp * _params._rs_reduction_factor_per_layer;
std::default_random_engine generator(seed());
std::uniform_int_distribution<> distribution_int(0, previous_scale_dp - 1);
std::uniform_real_distribution<double> distribution_real(0.0, 1.0);
scale._landmark_to_original_data_idx.resize(num_landmarks, 0);
scale._landmark_to_previous_scale_idx.resize(num_landmarks, 0);
scale._landmark_weight.resize(num_landmarks, 0);
scale._previous_scale_to_landmark_idx.resize(previous_scale_dp, -1);
scale._transition_matrix.resize(num_landmarks);
scale._area_of_influence.resize(previous_scale_dp);
int num_tries = 0;
selected_landmarks = 0;
while (selected_landmarks < num_landmarks) {
++num_tries;
int idx = distribution_int(generator);
assert(idx >= 0);
assert(idx < _num_dps);
if (_params._rs_outliers_removal_jumps > 0) {
idx = randomWalk(idx, _params._rs_outliers_removal_jumps, previous_scale._transition_matrix, distribution_real, generator);
}
if (scale._previous_scale_to_landmark_idx[idx] != -1) {
continue;
}
scale._previous_scale_to_landmark_idx[idx] = selected_landmarks;
scale._landmark_to_original_data_idx[selected_landmarks] = previous_scale._landmark_to_original_data_idx[idx];
scale._landmark_to_previous_scale_idx[selected_landmarks] = idx;
++selected_landmarks;
}
_statistics._landmarks_selection_num_walks = num_tries * _params._rs_outliers_removal_jumps;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::selectLandmarksWithStationaryDistribution(const Scale& previous_scale, Scale& scale, unsigned_int_type& selected_landmarks) {
utils::secureLog(_logger, "Landmark selection...");
const unsigned_int_type previous_scale_dp = previous_scale._transition_matrix.size();
int count = 0;
int thresh = _params._mcmcs_num_walks * _params._mcmcs_landmark_thresh;
//__block std::vector<unsigned_int_type> importance_sampling(previous_scale_dp,0);
std::vector<unsigned_int_type> importance_sampling(previous_scale_dp, 0);
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._mcmc_sampling_time);
//__block std::default_random_engine generator(seed());
//__block std::uniform_real_distribution<double> distribution_real(0.0, 1.0);
std::default_random_engine generator(seed());
std::uniform_real_distribution<double> distribution_real(0.0, 1.0);
selected_landmarks = 0;
utils::secureLog(_logger, "Monte Carlo Approximation...");
unsigned_int_type invalid = std::numeric_limits<unsigned_int_type>::max();
//#ifdef __USE_GCD__
// std::cout << "GCD dispatch, hierarchical_sne_inl 391.\n";
// dispatch_apply(previous_scale_dp, dispatch_get_global_queue(0, 0), ^(size_t d) {
//#else
#pragma omp parallel for
for (int d = 0; d < previous_scale_dp; ++d) {
//#endif //__USE_GCD__
for (int p = 0; p < _params._mcmcs_num_walks; ++p) {
int idx = d;
idx = randomWalk(idx, _params._mcmcs_walk_length, previous_scale._transition_matrix, distribution_real, generator);
if (idx != invalid) {
++importance_sampling[idx];
}
}
}
//#ifdef __USE_GCD__
// );
//#endif
// cheap hack to get the hard cutoff in, still computes the data driven part which should probably be replaced...
if (_params._hard_cut_off)
{
std::vector<unsigned_int_type> importance_sampling_sort = importance_sampling;
std::sort(importance_sampling_sort.begin(), importance_sampling_sort.end());
unsigned_int_type cutoff = importance_sampling_sort[(importance_sampling_sort.size() - 1) * (1.0f - _params._hard_cut_off_percentage)];
thresh = cutoff;
}
_statistics._landmarks_selection_num_walks = previous_scale_dp * _params._mcmcs_num_walks;
for (int i = 0; i < previous_scale_dp; ++i) {
if (importance_sampling[i] > thresh)
++count;
}
}
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._landmarks_selection_time);
utils::secureLog(_logger, "Selection...");
scale._previous_scale_to_landmark_idx.resize(previous_scale_dp, -1);
scale._area_of_influence.resize(previous_scale_dp);
scale._landmark_to_original_data_idx.resize(count);
scale._landmark_to_previous_scale_idx.resize(count);
scale._landmark_weight.resize(count);
scale._transition_matrix.resize(count);
selected_landmarks = 0;
for (int i = 0; i < previous_scale_dp; ++i) {
if (importance_sampling[i] > thresh) {
scale._previous_scale_to_landmark_idx[i] = selected_landmarks;
scale._landmark_to_original_data_idx[selected_landmarks] = previous_scale._landmark_to_original_data_idx[i];
scale._landmark_to_previous_scale_idx[selected_landmarks] = i;
++selected_landmarks;
}
}
}
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
bool HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::addScaleImpl() {
utils::ScopedTimer<scalar_type, utils::Seconds> timer_tot(_statistics._total_time);
utils::secureLog(_logger, "Add a new scale ...");
_hierarchy.push_back(Scale());
Scale& scale = _hierarchy[_hierarchy.size() - 1];
Scale& previous_scale = _hierarchy[_hierarchy.size() - 2];
const unsigned_int_type previous_scale_dp = previous_scale._landmark_to_original_data_idx.size();
// Landmark selection
unsigned_int_type selected_landmarks = 0;
if (_params._monte_carlo_sampling) {
selectLandmarksWithStationaryDistribution(previous_scale, scale, selected_landmarks);
}
else {
selectLandmarks(previous_scale, scale, selected_landmarks);
}
utils::secureLogValue(_logger, "\t#landmarks", selected_landmarks);
{//Area of influence
//__block std::default_random_engine generator(seed());
//__block std::uniform_real_distribution<double> distribution_real(0.0, 1.0);
std::default_random_engine generator(seed());
std::uniform_real_distribution<double> distribution_real(0.0, 1.0);
const unsigned_int_type max_jumps = 100;//1000.*selected_landmarks/previous_scale_dp;
const unsigned_int_type walks_per_dp = _params._num_walks_per_landmark;
utils::secureLog(_logger, "\tComputing area of influence...");
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._aoi_time);
//__block unsigned_int_type num_elem_in_Is(0);
unsigned_int_type num_elem_in_Is(0);
//#ifdef __USE_GCD__
// std::cout << "GCD dispatch, hierarchical_sne_inl 473.\n";
// dispatch_queue_t criticalQueue = dispatch_queue_create("critical", NULL);
// dispatch_apply(previous_scale_dp, dispatch_get_global_queue(0, 0), ^(size_t d) {
//#else
#pragma omp parallel for
for (int d = 0; d < previous_scale_dp; ++d) {
//#endif //__USE_GCD__
std::unordered_map<unsigned_int_type, unsigned_int_type> landmarks_reached;
for (int i = 0; i < walks_per_dp; ++i) {
auto res = randomWalk(d, scale._previous_scale_to_landmark_idx, max_jumps, previous_scale._transition_matrix, distribution_real, generator);
if (res != -1) {
++landmarks_reached[scale._previous_scale_to_landmark_idx[res]];
}
else {
//--i;
}
}
//#ifdef __USE_GCD__
// dispatch_sync(criticalQueue, ^
//#else
#pragma omp critical
//#endif
{
num_elem_in_Is += landmarks_reached.size();
for (auto l : landmarks_reached) {
for (auto other_l : landmarks_reached) {
//to avoid that the sparsity of the matrix it is much different from the effective sparsity
if (l.second <= _params._transition_matrix_prune_thresh || other_l.second <= _params._transition_matrix_prune_thresh)
continue;
if (l.first != other_l.first) {
scale._transition_matrix[l.first][other_l.first] += l.second * other_l.second * previous_scale._landmark_weight[d];
}
}
}
for (auto l : landmarks_reached) {
const scalar_type prob = scalar_type(l.second) / walks_per_dp;
scale._area_of_influence[d][l.first] = prob;
scale._landmark_weight[l.first] += prob * previous_scale._landmark_weight[d];
}
}
//#ifdef __USE_GCD__
// );
//#endif
}
//#ifdef __USE_GCD__
// );
//#endif
_statistics._aoi_num_walks = previous_scale_dp * walks_per_dp;
_statistics._aoi_sparsity = 1 - scalar_type(num_elem_in_Is) / (previous_scale_dp*selected_landmarks);
}
{
utils::secureLog(_logger, "\tComputing finite markov chain...");
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._fmc_time);
unsigned_int_type num_elem_in_Ts(0);
unsigned_int_type num_effective_elem_in_Ts(0);
for (int l = 0; l < scale._transition_matrix.size(); ++l) {
num_elem_in_Ts += scale._transition_matrix[l].size();
scalar_type sum(0);
for (auto& e : scale._transition_matrix[l]) {
sum += e.second;
}
for (auto& e : scale._transition_matrix[l]) {
e.second /= sum;
if (e.second > 0.01) {
++num_effective_elem_in_Ts;
}
}
}
_statistics._fmc_sparsity = 1 - scalar_type(num_elem_in_Ts) / (selected_landmarks*selected_landmarks);
_statistics._fmc_effective_sparsity = 1 - scalar_type(num_effective_elem_in_Ts) / (selected_landmarks*selected_landmarks);
}
}
utils::secureLogValue(_logger, "Min memory requirements (MB)", scale.mimMemoryOccupation());
return true;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
bool HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::addScaleOutOfCoreImpl() {
typedef typename sparse_scalar_matrix_type::value_type map_type;
typedef typename map_type::key_type key_type;
typedef typename map_type::mapped_type mapped_type;
typedef hdi::data::MapHelpers<key_type, mapped_type, map_type> map_helpers_type;
utils::ScopedTimer<scalar_type, utils::Seconds> timer_tot(_statistics._total_time);
utils::secureLog(_logger, "Add a new scale with out-of-core implementation ...");
_hierarchy.push_back(Scale());
Scale& scale = _hierarchy[_hierarchy.size() - 1];
Scale& previous_scale = _hierarchy[_hierarchy.size() - 2];
const unsigned_int_type previous_scale_dp = previous_scale._landmark_to_original_data_idx.size();
// Landmark selection
unsigned_int_type selected_landmarks = 0;
if (_params._monte_carlo_sampling) {
selectLandmarksWithStationaryDistribution(previous_scale, scale, selected_landmarks);
}
else {
selectLandmarks(previous_scale, scale, selected_landmarks);
}
utils::secureLogValue(_logger, "\t#landmarks", selected_landmarks);
{//Area of influence
std::default_random_engine generator(seed());
std::uniform_real_distribution<double> distribution_real(0.0, 1.0);
const unsigned_int_type max_jumps = 200;//1000.*selected_landmarks/previous_scale_dp;
const unsigned_int_type walks_per_dp = _params._num_walks_per_landmark;
utils::secureLog(_logger, "\tComputing area of influence...");
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._aoi_time);
int d = 0;
unsigned_int_type num_elem_in_Is(0);
{
utils::LogProgress progress(_verbose ? _logger : nullptr);
progress.setNumSteps(previous_scale_dp);
progress.setNumTicks(previous_scale_dp / 50000);
progress.setName("Area of influence");
progress.start();
//#ifdef __USE_GCD__
// std::cout << "GCD dispatch, hierarchical_sne_inl 587.\n";
// dispatch_queue_t criticalQueue = dispatch_queue_create("critical", NULL);
// dispatch_apply(previous_scale_dp, dispatch_get_global_queue(0, 0), ^(size_t d) {
//#else
#pragma omp parallel for
for (int d = 0; d < previous_scale_dp; ++d) {
//#endif //__USE_GCD__
//map because it must be ordered for the initialization of the maps
std::map<unsigned_int_type, scalar_type> landmarks_reached;
for (int i = 0; i < walks_per_dp; ++i) {
auto res = randomWalk(d, scale._previous_scale_to_landmark_idx, max_jumps, previous_scale._transition_matrix, distribution_real, generator);
if (res != -1) {
++landmarks_reached[scale._previous_scale_to_landmark_idx[res]];
}
else {
//--i;
}
}
//normalization
for (auto& l : landmarks_reached) {
l.second = scalar_type(l.second) / walks_per_dp;
}
//saving aoi
map_helpers_type::initialize(scale._area_of_influence[d], landmarks_reached.begin(), landmarks_reached.end());
map_helpers_type::shrinkToFit(scale._area_of_influence[d]);
progress.step();
}
//#ifdef __USE_GCD__
// );
//#endif
progress.finish();
}
utils::secureLog(_logger, "\tCaching weights...");
//caching of the weights
for (d = 0; d < previous_scale_dp; ++d) {
num_elem_in_Is += scale._area_of_influence[d].size();
for (auto& e : scale._area_of_influence[d]) {
scale._landmark_weight[e.first] += e.second;
}
}
utils::secureLog(_logger, "\tInverting the AoI matrix...");
//Inverse AoI -> critical for the computation time
sparse_scalar_matrix_type inverse_aoi;
map_helpers_type::invert(scale._area_of_influence, inverse_aoi);
utils::secureLog(_logger, "\tComputing similarities...");
//Similarities -> compute the overlap of the area of influence
{
utils::LogProgress progress(_verbose ? _logger : nullptr);
progress.setNumSteps(scale._transition_matrix.size());
progress.setNumTicks(scale._transition_matrix.size() / 5000);
progress.setName("Similarities");
progress.start();
// #ifdef __USE_GCD__
// std::cout << "GCD dispatch, hierarchical_sne_inl 602.\n";
// dispatch_apply(scale._transition_matrix.size(), dispatch_get_global_queue(0, 0), ^(size_t l) {
// #else
#pragma omp parallel for
for (int l = 0; l < scale._transition_matrix.size(); ++l) {
// #endif //__USE_GCD__
//ordered for efficient initialization
std::map<typename sparse_scalar_matrix_type::value_type::key_type, typename sparse_scalar_matrix_type::value_type::mapped_type> temp_trans_mat; // use map here
for (const auto& d : inverse_aoi[l]) {
for (const auto& aoi : scale._area_of_influence[d.first]) {
double single_landmark_thresh = (1. / 100.)*_params._transition_matrix_prune_thresh;
if (l != aoi.first) {
if (d.second <= single_landmark_thresh || aoi.second <= single_landmark_thresh)
continue;
temp_trans_mat[aoi.first] += d.second * aoi.second * previous_scale._landmark_weight[d.first];
}
}
}
//normalization
double sum = 0;
for (auto& v : temp_trans_mat) { sum += v.second; }
for (auto& v : temp_trans_mat) { v.second /= sum; }
//removed the threshold depending on the scale -> it makes sense to remove only uneffective neighbors based at every scale -> memory is still under control
map_helpers_type::initialize(scale._transition_matrix[l],temp_trans_mat.begin(),temp_trans_mat.end(), static_cast<mapped_type>(0.001));
map_helpers_type::shrinkToFit(scale._transition_matrix[l]);
progress.step();
}
// #ifdef __USE_GCD__
// );
// #endif
progress.finish();
}
_statistics._aoi_num_walks = previous_scale_dp * walks_per_dp;
_statistics._aoi_sparsity = 1 - scalar_type(num_elem_in_Is) / (previous_scale_dp*selected_landmarks);
}
{
utils::secureLog(_logger, "\tComputing finite markov chain...");
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._fmc_time);
unsigned_int_type num_elem_in_Ts(0);
unsigned_int_type num_effective_elem_in_Ts(0);
for (int l = 0; l < scale._transition_matrix.size(); ++l) {
num_elem_in_Ts += scale._transition_matrix[l].size();
scalar_type sum(0);
for (auto& e : scale._transition_matrix[l]) {
sum += e.second;
}
for (auto& e : scale._transition_matrix[l]) {
e.second /= sum;
if (e.second > 0.001) {
++num_effective_elem_in_Ts;
}
}
}
_statistics._fmc_sparsity = 1 - scalar_type(num_elem_in_Ts) / (selected_landmarks*selected_landmarks);
_statistics._fmc_effective_sparsity = 1 - scalar_type(num_effective_elem_in_Ts) / (selected_landmarks*selected_landmarks);
}
}
return true;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
typename HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::unsigned_int_type HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::seed()const {
return(_params._seed > 0) ? static_cast<unsigned_int_type>(_params._seed) : std::chrono::system_clock::now().time_since_epoch().count();
}
///////////////////////////////////////////////////////////////////
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getInfluencedLandmarksInPreviousScale(unsigned_int_type scale_id, std::vector<unsigned_int_type>& idxes, std::map<unsigned_int_type, scalar_type>& neighbors)const {
neighbors.clear();
std::unordered_set<unsigned_int_type> set_idxes;
set_idxes.insert(idxes.begin(), idxes.end());
auto not_found = set_idxes.end();
for (int d = 0; d < _hierarchy[scale_id]._area_of_influence.size(); ++d) {
double probability = 0;
for (auto& v : _hierarchy[scale_id]._area_of_influence[d]) {
if (set_idxes.find(v.first) != not_found) {
probability += v.second;
}
}
if (probability > 0) {
neighbors[d] = probability;
}
}
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getInfluencingLandmarksInNextScale(unsigned_int_type scale_id, std::vector<unsigned_int_type>& idxes, std::map<unsigned_int_type, scalar_type>& neighbors)const {
neighbors.clear();
int next_scale_id = scale_id + 1;
if (next_scale_id + 1 > _hierarchy.size()) return;
std::map<unsigned_int_type, scalar_type> completeSet;
for (int i = 0; i < idxes.size(); i++)
{
for (auto& v : _hierarchy[next_scale_id]._area_of_influence[idxes[i]]) {
neighbors[v.first] += v.second;
}
}
for (int i = 0; i < _hierarchy[next_scale_id]._area_of_influence.size(); i++)
{
for (auto& v : _hierarchy[next_scale_id]._area_of_influence[i]) {
completeSet[v.first] += v.second;
}
}
for (auto& v : neighbors)
{
neighbors[v.first] /= completeSet[v.first];
}
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getInterpolationWeights(sparse_scalar_matrix_type& influence, int scale)const {
influence.clear();
influence.resize(_num_dps);
scale = (scale < 0) ? (_hierarchy.size() - 1) : scale;
checkAndThrowLogic(scale < _hierarchy.size(), "getInterpolationWeights: Invalid scale");
//#ifdef __USE_GCD__
// std::cout << "GCD dispatch, hierarchical_sne_inl 724.\n";
// dispatch_apply(_num_dps, dispatch_get_global_queue(0, 0), ^(size_t i) {
//#else
#pragma omp parallel for
for (int i = 0; i < _num_dps; ++i) {
//#endif //__USE_GCD__
influence[i] = _hierarchy[1]._area_of_influence[i];
for (int s = 2; s <= scale; ++s) {
typename sparse_scalar_matrix_type::value_type temp_link;
for (auto l : influence[i]) {
for (auto new_l : _hierarchy[s]._area_of_influence[l.first]) {
temp_link[new_l.first] += l.second * new_l.second;
}
}
influence[i] = temp_link;
}
}
//#ifdef __USE_GCD__
// );
//#endif
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getInterpolationWeights(const std::vector<unsigned int>& data_points, sparse_scalar_matrix_type& influence, int scale)const {
auto n = data_points.size();
influence.clear();
influence.resize(n);
scale = (scale < 0) ? (_hierarchy.size() - 1) : scale;
checkAndThrowLogic(scale < _hierarchy.size(), "getInterpolationWeights: Invalid scale");
//#ifdef __USE_GCD__
// std::cout << "GCD dispatch, hierarchical_sne_inl 755.\n";
// dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t i) {
//#else
#pragma omp parallel for
for (int i = 0; i < n; ++i) {
//#endif //__USE_GCD__
influence[i] = _hierarchy[1]._area_of_influence[data_points[i]];
for (int s = 2; s <= scale; ++s) {
typename sparse_scalar_matrix_type::value_type temp_link;
for (auto l : influence[i]) {
for (auto new_l : _hierarchy[s]._area_of_influence[l.first]) {
temp_link[new_l.first] += l.second * new_l.second;
}
}
influence[i] = temp_link;
}
}
//#ifdef __USE_GCD__
// );
//#endif
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getInfluenceOnDataPoint(unsigned_int_type dp, std::vector<std::unordered_map<unsigned_int_type, scalar_type>>& influence, scalar_type thresh, bool normalized)const {
assert(dp < _hierarchy[0].size());
influence.resize(_hierarchy.size());
influence[0][dp] = 1; //Hey it's me!
if (influence.size() == 1) {
return;
}
for (auto& v : _hierarchy[1]._area_of_influence[dp]) {
influence[1][v.first] = v.second;
}
if (normalized)
{
double sum = 0;
for (auto& v : influence[1]) { sum += v.second; }
for (auto& v : influence[1]) { v.second /= sum; }
}
for (int s = 2; s < _hierarchy.size(); ++s) {
for (auto l : influence[s - 1]) {
if (l.second >= thresh) {
for (auto new_l : _hierarchy[s]._area_of_influence[l.first]) {
influence[s][new_l.first] += l.second * new_l.second;
}
}
}
if (normalized)
{
double sum = 0;
for (auto& v : influence[s]) { sum += v.second; }
for (auto& v : influence[s]) { v.second /= sum; }
}
}
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getStochasticLocationAtHigherScale(unsigned_int_type orig_scale, unsigned_int_type dest_scale, const std::vector<unsigned_int_type>& subset_orig_scale, sparse_scalar_matrix_type& closeness)const {
checkAndThrowLogic(dest_scale > orig_scale, "getStochasticLocationAtHigherScale (0)");
checkAndThrowLogic(orig_scale < _hierarchy.size() - 1, "getStochasticLocationAtHigherScale (2)");
checkAndThrowLogic(dest_scale < _hierarchy.size(), "getStochasticLocationAtHigherScale (3)");
closeness.clear();
closeness.resize(subset_orig_scale.size());
//#ifdef __USE_GCD__
// std::cout << "GCD dispatch, hierarchical_sne_inl 814.\n";
// dispatch_apply(subset_orig_scale.size(), dispatch_get_global_queue(0, 0), ^(size_t i) {
//#else
#pragma omp parallel for
for (int i = 0; i < subset_orig_scale.size(); ++i) {
//#endif //__USE_GCD__
assert(subset_orig_scale[i] < _hierarchy[orig_scale + 1]._area_of_influence.size());
closeness[i] = _hierarchy[orig_scale + 1]._area_of_influence[subset_orig_scale[i]];
for (int s = orig_scale + 2; s <= dest_scale; ++s) {
typename sparse_scalar_matrix_type::value_type temp_link;
for (auto l : closeness[i]) {
for (auto new_l : _hierarchy[s]._area_of_influence[l.first]) {
temp_link[new_l.first] += l.second * new_l.second;
}
}
closeness[i] = temp_link;
}
}
//#ifdef __USE_GCD__
// );
//#endif
}
//! This function computes the cumulative area of influence (aoi) of a "selection"of landmark points at scale "scale_id" for each point at the data level. This process is described in Section 3.3 of https://doi.org/10.1111/cgf.12878
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getAreaOfInfluence(unsigned_int_type scale_id, const std::vector<unsigned_int_type>& selection, std::vector<scalar_type>& aoi)const {
typedef typename sparse_scalar_matrix_type::value_type map_type;
typedef typename map_type::key_type key_type;
typedef typename map_type::mapped_type mapped_type;
typedef hdi::data::MapHelpers<key_type, mapped_type, map_type> map_helpers_type;
checkAndThrowLogic(scale_id < _hierarchy.size(), "getAreaOfInfluence (3)");
// initialize the area of influence vector
aoi.assign(scale(0).size(), 0);
// at scale 0 every point has a maximum area of influence (1) on itself.
if (scale_id == 0) {
#pragma omp parallel for
for (int i = 0; i < selection.size(); ++i) {
aoi[selection[i]] = 1;
}
}
else {
const std::unordered_set<unsigned_int_type> selected_scale_id_landmarks(selection.cbegin(), selection.cend()); // unordered set since we need all items to be unique
// Compute for every point at the data level the area of influence (aoi)
// of the "selection" landmark points at scale scale_id through a chain of sparse matrix multiplications
#pragma omp parallel for schedule(dynamic,1)
for (int i = 0; i < scale(0).size(); ++i) {
const auto& scale_1_aois = scale(1)._area_of_influence[i];
// Declare a holder for the super scale landmarks aois, using std::vector for quick look-up
std::vector<std::pair<key_type, mapped_type>> super_aois(scale_1_aois.begin(), scale_1_aois.end());
// Walk the scale hierarchy from super-scale to sub-scale
// For each scale compute the cumulative influence
// of the landmark points from the lower scale on data point "i" at the current scale.
for (int s = 2; s <= scale_id; ++s) {
// Use unordered_map for quick insertion
// Ordering is not needed but we do want to avoid multiple entries per data/landmark point.
std::unordered_map<key_type, mapped_type> current_aoi_cumulative;
// Factor in the influence of all the super scale landmark aois
for (auto super_aoi : super_aois) {
for (auto current_aoi : scale(s)._area_of_influence[super_aoi.first]) {
// compute cumulative aoi sum of products
current_aoi_cumulative[current_aoi.first] += super_aoi.second * current_aoi.second;
}
}
// Copy the influence of the current_aoi_cumulative to the super scale aoi holder for the next iteration
super_aois.resize(current_aoi_cumulative.size());
std::copy(current_aoi_cumulative.cbegin(), current_aoi_cumulative.cend(), super_aois.begin());
}
// Now the current scale for super_aois is scale_id.
// So the indices in "selection" match the indices in super_aois
// Check if that landmark is in the selection,
// if so add the area of influence to the cumulative area of influence of the selection on point i
for (auto scale_id_aoi : super_aois) {
if (selected_scale_id_landmarks.find(scale_id_aoi.first) != selected_scale_id_landmarks.end()) {
aoi[i] += scale_id_aoi.second;
}
}
}
}
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getAreaOfInfluenceTopDown(unsigned_int_type scale_id, const std::vector<unsigned_int_type>& selection, std::vector<scalar_type>& aoi, double threshold)const {
typedef typename sparse_scalar_matrix_type::value_type map_type;
typedef typename map_type::key_type key_type;
typedef typename map_type::mapped_type mapped_type;
typedef hdi::data::MapHelpers<key_type, mapped_type, map_type> map_helpers_type;
checkAndThrowLogic(scale_id < _hierarchy.size(), "getAreaOfInfluenceTopDown (3)");
double gamma = 0.3;
if ((threshold >= 0) && (threshold <= 1.0)) {
gamma = threshold;
}
aoi.clear();
aoi.resize(scale(0).size(), 0);
std::unordered_set<unsigned int> set_selected_idxes;
set_selected_idxes.insert(selection.begin(), selection.end());
if (scale_id == 0) {
for (int i = 0; i < selection.size(); ++i) {
aoi[selection[i]] = 1;
}
}
else {
std::vector<unsigned_int_type> scale_selection = selection;
for (int s = scale_id; s > 0; --s) {
std::map<unsigned_int_type, scalar_type> neighbors;
getInfluencedLandmarksInPreviousScale(s, scale_selection, neighbors);
scale_selection.clear();
for (auto neigh : neighbors) {
if (neigh.second > gamma) {
scale_selection.push_back(neigh.first);
}
}
}
for (int i = 0; i < scale_selection.size(); ++i) {
aoi[scale_selection[i]] = 1;
}
}
}
///////////////////////////////////////////////////////////////////
/// RANDOM WALKS
///////////////////////////////////////////////////////////////////
//Compute a random walk using a transition matrix
template <typename scalar_type, typename sparse_scalar_matrix_type>
typename HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::unsigned_int_type HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::randomWalk(unsigned_int_type starting_point, unsigned_int_type max_length, const sparse_scalar_matrix_type& transition_matrix, std::uniform_real_distribution<double>& distribution, std::default_random_engine& generator) {
unsigned_int_type dp_idx = starting_point;
int walk_length = 0;
do {
const double rnd_num = distribution(generator);
unsigned_int_type idx_knn = dp_idx;
double incremental_prob = 0;
for (auto& elem : transition_matrix[dp_idx]) {
incremental_prob += elem.second;
if (rnd_num < incremental_prob) {
idx_knn = elem.first;
break;
}
}
//assert(idx_knn != dp_idx);
if (idx_knn == dp_idx) {
return std::numeric_limits<unsigned_int_type>::max();
// std::cout << "DISCONNECTED!" << std::endl;
}
dp_idx = idx_knn;
++walk_length;
} while (walk_length <= max_length);
return dp_idx;
}
//!Compute a random walk using a transition matrix
template <typename scalar_type, typename sparse_scalar_matrix_type>
int HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::randomWalk(unsigned_int_type starting_point, const std::vector<int>& stopping_points, unsigned_int_type max_length, const sparse_scalar_matrix_type& transition_matrix, std::uniform_real_distribution<double>& distribution, std::default_random_engine& generator) {
unsigned_int_type dp_idx = starting_point;
int walk_length = 0;
do {
const double rnd_num = distribution(generator);
unsigned_int_type idx_knn = dp_idx;
double incremental_prob = 0;
for (auto& elem : transition_matrix[dp_idx]) {
incremental_prob += elem.second;
if (rnd_num < incremental_prob) {
idx_knn = elem.first;
break;
}
}
//assert(idx_knn != dp_idx);
if (idx_knn == dp_idx) {
return -1;
std::cout << "42!" << std::endl;
}
dp_idx = idx_knn;
++walk_length;
} while (stopping_points[dp_idx] == -1 && walk_length <= max_length);
if (walk_length > max_length) {
return -1;
}
return static_cast<int>(dp_idx);
}
////////////////////////////////////////////////////////////////////////////////////
template <typename scalar_type, typename sparse_scalar_matrix_type>
typename HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::int_type HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::ClusterTree::getFreeClusterId(unsigned_int_type scale_id) {
int_type max = std::numeric_limits<int_type>::max();
for (int_type i = 0; i < max; ++i) {
for (int j = 0; j < _cluster_tree[scale_id].size(); ++j) {
if (i != _cluster_tree[scale_id][j].id()) {
return i;
}
}
}
return 0;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::ClusterTree::addCluster(unsigned_int_type scale_id, const cluster_type& cluster) {
checkAndThrowLogic(scale_id < _cluster_tree.size(), "ClusterHierarchy::addCluster: invalid scale");
for (int j = 0; j < _cluster_tree[scale_id].size(); ++j) {
checkAndThrowLogic(cluster.id() != _cluster_tree[scale_id][j].id(), "ClusterHierarchy::addCluster: duplicated id");
}
if (scale_id == _cluster_tree.size() - 1) {
checkAndThrowLogic(cluster.parent_id() == Cluster::NULL_LINK, "ClusterHierarchy::addCluster: root clusters must have parent_id = Cluster::NULL_LINK");
}
else {
checkAndThrowLogic(cluster.parent_id() != Cluster::NULL_LINK, "ClusterHierarchy::addCluster: non-root clusters must have parent_id != Cluster::NULL_LINK");
}
_cluster_tree[scale_id].push_back(cluster);
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::ClusterTree::removeCluster(unsigned_int_type scale_id, int_type cluster_id) {
checkAndThrowLogic(scale_id < _cluster_tree.size(), "ClusterHierarchy::removeCluster: invalid scale");
for (int i = 0; i < _cluster_tree[scale_id].size(); ++i) {
if (_cluster_tree[scale_id][i].id() == cluster_id) {
_cluster_tree[scale_id].erase(_cluster_tree[scale_id].begin() + i);
break;
}
}
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
bool HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::ClusterTree::hasClusterId(unsigned_int_type scale_id, int_type cluster_id)const {
checkAndThrowLogic(scale_id < _cluster_tree.size(), "ClusterHierarchy::hasClusterId: invalid scale");
for (int j = 0; j < _cluster_tree[scale_id].size(); ++j) {
if (cluster_id == _cluster_tree[scale_id][j].id()) { return true; }
}
return false;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
const typename HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::ClusterTree::cluster_type& HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::ClusterTree::cluster(unsigned_int_type scale_id, int_type cluster_id)const {
checkAndThrowLogic(hasClusterId(scale_id, cluster_id), "ClusterHierarchy::cluster: invalid cluster");
for (int j = 0; j < _cluster_tree[scale_id].size(); ++j) {
if (cluster_id == _cluster_tree[scale_id][j].id()) {
return _cluster_tree[scale_id][j];
}
}
throw std::logic_error("Invalid cluster");
//return cluster_type(); //INVALID
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
bool HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::ClusterTree::checkCluterConsistency(const HierarchicalSNE& hsne, unsigned_int_type scale_id, int_type cluster_id) {
checkAndThrowLogic(hasClusterId(scale_id, cluster_id), "ClusterHierarchy::checkCluterConsistency: invalid cluster");
if (scale_id == _cluster_tree.size() - 1) {
std::stringstream ss;
ss << "Validating cluster " << cluster_id << " at scale " << scale_id << ":\tis a root node => valid";
utils::secureLog(_logger, ss.str());
return true;
}
int_type cluster_id_in_vector = -1;
for (int j = 0; j < _cluster_tree[scale_id].size(); ++j) {
if (cluster_id == _cluster_tree[scale_id][j].id()) {
cluster_id_in_vector = j;
}
}
std::vector<scalar_type> influence(_cluster_tree[scale_id + 1].size(), 0);
scalar_type unclustered_influence(0);
auto& scale = hsne.scale(scale_id + 1);
for (auto e : _cluster_tree[scale_id][cluster_id_in_vector].landmarks()) {
for (auto aoi : scale._area_of_influence[e]) {
bool found = false;
for (int i = 0; i < influence.size(); ++i) {
auto it = _cluster_tree[scale_id + 1][i].landmarks().find(aoi.first);
if (it != _cluster_tree[scale_id + 1][i].landmarks().end()) {
influence[i] += aoi.second;
found = true;
}
}
if (!found) {
unclustered_influence += aoi.second;
}
}
}
std::stringstream ss;
ss << "Validating cluster " << cluster_id << " at scale " << scale_id << " with parent " << _cluster_tree[scale_id][cluster_id_in_vector].parent_id() << " (" << _cluster_tree[scale_id][cluster_id_in_vector].notes() << ")" << std::endl;
ss << "\tUnclusterd:\t" << unclustered_influence << std::endl;
scalar_type max(unclustered_influence);
int_type res_id(-1);
for (int i = 0; i < influence.size(); ++i) {
ss << "\tCluster-" << _cluster_tree[scale_id + 1][i].id() << " (" << _cluster_tree[scale_id + 1][i].notes() << ") :\t" << influence[i] << std::endl;
if (influence[i] > max) {
max = influence[i];
res_id = _cluster_tree[scale_id + 1][i].id();
}
}
utils::secureLog(_logger, ss.str());
if (res_id == _cluster_tree[scale_id][cluster_id_in_vector].parent_id()) {
utils::secureLog(_logger, "Valid");
return true;
}
utils::secureLog(_logger, "INVALID!");
return false;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
bool HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::ClusterTree::checkTreeConsistency(const HierarchicalSNE& hsne) {
bool res = true;
for (int s = _cluster_tree.size() - 1; s >= 0; --s) {
for (int c = 0; c < _cluster_tree[s].size(); ++c) {
res &= checkCluterConsistency(hsne, s, _cluster_tree[s][c].id());
}
}
return res;
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::ClusterTree::computePointToClusterAssociation(const HierarchicalSNE& hsne, unsigned_int_type pnt_id, std::tuple<unsigned_int_type, int_type, scalar_type>& res) {
std::vector<std::unordered_map<unsigned_int_type, scalar_type>> influence;
hsne.getInfluenceOnDataPoint(pnt_id, influence);
res = std::tuple<unsigned_int_type, int_type, scalar_type>(_cluster_tree.size() - 1, -1, 1);
std::vector<unsigned_int_type> clusters_to_analyze(_cluster_tree[_cluster_tree.size() - 1].size());
std::iota(clusters_to_analyze.begin(), clusters_to_analyze.end(), 0);
//just for test
for (int s = _cluster_tree.size() - 1; s >= 0 && clusters_to_analyze.size(); --s) {
unsigned_int_type scale_id = s;
std::vector<scalar_type> cluster_influence(clusters_to_analyze.size(), 0);
scalar_type unclustered_influence(0);
for (auto aoi : influence[scale_id]) {
bool found = false;
for (int i = 0; i < clusters_to_analyze.size(); ++i) {
auto it = _cluster_tree[scale_id][clusters_to_analyze[i]].landmarks().find(aoi.first);
if (it != _cluster_tree[scale_id][clusters_to_analyze[i]].landmarks().end()) {
cluster_influence[i] += aoi.second;
found = true;
}
}
if (!found) {
unclustered_influence += aoi.second;
}
}
scalar_type max(unclustered_influence);
int_type cluster_id(-1);
for (int i = 0; i < clusters_to_analyze.size(); ++i) {
if (cluster_influence[i] > max) {
max = cluster_influence[i];
cluster_id = _cluster_tree[scale_id][clusters_to_analyze[i]].id();
}
}
if (cluster_id == -1) {
return;
}
res = std::tuple<unsigned_int_type, int_type, scalar_type>(scale_id, cluster_id, max);
//compute children nodes
clusters_to_analyze.clear();
if (s != 0) {
for (int i = 0; i < _cluster_tree[s - 1].size(); ++i) {
if (_cluster_tree[s - 1][i].parent_id() == cluster_id) {
clusters_to_analyze.push_back(i);
}
}
}
}
}
template <typename scalar_type, typename sparse_scalar_matrix_type>
void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::ClusterTree::computePointsToClusterAssociation(const HierarchicalSNE& hsne, std::vector<std::tuple<unsigned_int_type, int_type, scalar_type>>& res) {
res.resize(hsne.scale(0).size());
//#ifdef __USE_GCD__
// std::cout << "GCD dispatch, hierarchical_sne_inl 1227.\n";
// dispatch_apply(res.size(), dispatch_get_global_queue(0, 0), ^(size_t i) {
//#else
#pragma omp parallel for
for (int i = 0; i < res.size(); ++i) {
//#endif //__USE_GCD__
computePointToClusterAssociation(hsne, i, res[i]);
}
//#ifdef __USE_GCD__
// );
//#endif
}
///////////////////////////////////////////////////////////////////////////////////7
namespace IO {
template <typename hsne_type, class output_stream_type>
void saveHSNE(const hsne_type& hsne, output_stream_type& stream, utils::AbstractLog* log) {
checkAndThrowLogic(hsne.hierarchy().size(), "Cannot save an empty H-SNE hierarchy!!!");
utils::secureLog(log, "Saving H-SNE hierarchy to file");
typedef float io_scalar_type;
typedef float io_unsigned_int_type;
//Version
io_unsigned_int_type major_version = 0;
io_unsigned_int_type minor_version = 0;
stream.write(reinterpret_cast<char*>(&major_version), sizeof(io_unsigned_int_type));
stream.write(reinterpret_cast<char*>(&minor_version), sizeof(io_unsigned_int_type));
//Number of scales
io_unsigned_int_type num_scales = static_cast<io_unsigned_int_type>(hsne.hierarchy().size());
stream.write(reinterpret_cast<char*>(&num_scales), sizeof(io_unsigned_int_type));
{
//The first scale contains only the transition matrix
auto& scale = hsne.scale(0);
io_unsigned_int_type n = static_cast<io_unsigned_int_type>(scale.size());
utils::secureLogValue(log, "Saving scale", 0);
utils::secureLog(log, "\tsize", n);
stream.write(reinterpret_cast<char*>(&n), sizeof(io_unsigned_int_type));
utils::secureLog(log, "\t... transition matrix ...");
data::IO::saveSparseMatrix(scale._transition_matrix, stream, log);
}
for (int s = 1; s < num_scales; ++s) {
auto& scale = hsne.scale(s);
io_unsigned_int_type n = static_cast<io_unsigned_int_type>(scale.size());
utils::secureLogValue(log, "Saving scale", s);
utils::secureLogValue(log, "\tsize", n);
stream.write(reinterpret_cast<char*>(&n), sizeof(io_unsigned_int_type));
utils::secureLog(log, "\t... transition matrix ...");
data::IO::saveSparseMatrix(scale._transition_matrix, stream, log);
utils::secureLog(log, "\t... landmarks to original data ...");
data::IO::saveUIntVector(scale._landmark_to_original_data_idx, stream, log);
utils::secureLog(log, "\t... landmarks to previous scale ...");
data::IO::saveUIntVector(scale._landmark_to_previous_scale_idx, stream, log);
utils::secureLog(log, "\t... landmark weights ...");
data::IO::saveScalarVector(scale._landmark_weight, stream, log);
utils::secureLog(log, "\t... previous scale to current scale landmarks ...");
data::IO::saveIntVector(scale._previous_scale_to_landmark_idx, stream, log);
utils::secureLog(log, "\t... area of influence ...");
data::IO::saveSparseMatrix(scale._area_of_influence, stream, log);
}
}
///////////////////////////////////////////////////////
template <typename hsne_type, class input_stream_type>
void loadHSNE(hsne_type& hsne, input_stream_type& stream, utils::AbstractLog* log) {
utils::secureLog(log, "Loading H-SNE hierarchy from file");
typedef float io_scalar_type;
typedef float io_unsigned_int_type;
//Version
io_unsigned_int_type major_version = 0;
io_unsigned_int_type minor_version = 0;
stream.read(reinterpret_cast<char*>(&major_version), sizeof(io_unsigned_int_type));
stream.read(reinterpret_cast<char*>(&minor_version), sizeof(io_unsigned_int_type));
checkAndThrowRuntime(major_version == 0, "Invalid major version");
checkAndThrowRuntime(minor_version == 0, "Invalid minor version");
//Number of scales
io_unsigned_int_type num_scales;
stream.read(reinterpret_cast<char*>(&num_scales), sizeof(io_unsigned_int_type));
checkAndThrowRuntime(num_scales > 0, "Cannot load an empty hierarchy");
{
hsne.hierarchy().clear();
hsne.hierarchy().push_back(typename hsne_type::Scale());
auto& scale = hsne.scale(0);
io_unsigned_int_type n = static_cast<io_unsigned_int_type>(scale.size());
utils::secureLogValue(log, "Loading scale", 0);
stream.read(reinterpret_cast<char*>(&n), sizeof(io_unsigned_int_type));
utils::secureLog(log, "\tsize", n);
utils::secureLog(log, "\t... transition matrix ...");
data::IO::loadSparseMatrix(scale._transition_matrix, stream, log);
utils::secureLog(log, "\t... (init) landmarks to original data ...");
scale._landmark_to_original_data_idx.resize(n);
std::iota(scale._landmark_to_original_data_idx.begin(), scale._landmark_to_original_data_idx.end(), 0);
utils::secureLog(log, "\t... (init) landmarks to previous scale ...");
scale._landmark_to_previous_scale_idx.resize(n);
std::iota(scale._landmark_to_previous_scale_idx.begin(), scale._landmark_to_previous_scale_idx.end(), 0);
utils::secureLog(log, "\t... (init) landmark weights ...");
scale._landmark_weight.resize(n, 1);
}
for (int s = 1; s < num_scales; ++s) {
hsne.hierarchy().push_back(typename hsne_type::Scale());
auto& scale = hsne.scale(s);
io_unsigned_int_type n;
utils::secureLogValue(log, "Loading scale", s);
stream.read(reinterpret_cast<char*>(&n), sizeof(io_unsigned_int_type));
utils::secureLogValue(log, "\tsize", n);
utils::secureLog(log, "\t... transition matrix ...");
data::IO::loadSparseMatrix(scale._transition_matrix, stream, log);
utils::secureLog(log, "\t... landmarks to original data ...");
data::IO::loadUIntVector(scale._landmark_to_original_data_idx, stream, log);
utils::secureLog(log, "\t... landmarks to previous scale ...");
data::IO::loadUIntVector(scale._landmark_to_previous_scale_idx, stream, log);
utils::secureLog(log, "\t... landmark weights ...");
data::IO::loadScalarVector(scale._landmark_weight, stream, log);
utils::secureLog(log, "\t... previous scale to current scale landmarks ...");
data::IO::loadIntVector(scale._previous_scale_to_landmark_idx, stream, log);
utils::secureLog(log, "\t... area of influence ...");
data::IO::loadSparseMatrix(scale._area_of_influence, stream, log);
}
}
}
}
}
#endif
|
GB_unop__exp2_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__exp2_fp64_fp64)
// op(A') function: GB (_unop_tran__exp2_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = exp2 (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = exp2 (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = exp2 (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP2 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__exp2_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = exp2 (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = exp2 (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__exp2_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.