source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
atomic_double.c | #include <stdio.h>
#define N 100
int main()
{
int fail = 0;
int error = 0;
double a = 0;
int ii = 0;
#pragma omp target map(tofrom:a)
{
#pragma omp parallel for
for(ii = 0; ii < N; ++ii)
#pragma omp atomic
a++;
}
// Check result
double result = a;
double expect = N;
if (result != expect)
{
printf("update (implicit) a %f != %f (error %d)\n",
result, expect, ++error);
fail = 1;
}
if(!fail)
printf("successful\n");
else
fail = 0;
printf("done with %d errors\n", error);
return error;
}
|
GB_unop__sinh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sinh_fc32_fc32)
// op(A') function: GB (_unop_tran__sinh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = csinhf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csinhf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = csinhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SINH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sinh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csinhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csinhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sinh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__land_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_int64
// A.*B function (eWiseMult): GB_AemultB__land_int64
// A*D function (colscale): GB_AxD__land_int64
// D*A function (rowscale): GB_DxB__land_int64
// C+=B function (dense accum): GB_Cdense_accumB__land_int64
// C+=b function (dense accum): GB_Cdense_accumb__land_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_int64
// C=scalar+B GB_bind1st__land_int64
// C=scalar+B' GB_bind1st_tran__land_int64
// C=A+scalar GB_bind2nd__land_int64
// C=A'+scalar GB_bind2nd_tran__land_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT64 || GxB_NO_LAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__land_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt)
{
#if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx512vnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt);
conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx512vnni(kernel, kernel_tm_pack8to1, inch, outch, opt);
return;
}
#endif
#if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avxvnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt);
conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avxvnni(kernel, kernel_tm_pack8to1, inch, outch, opt);
return;
}
#endif
#if NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx2(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt);
conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx2(kernel, kernel_tm_pack8to1, inch, outch, opt);
return;
}
#endif
#if NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_xop(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt);
conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_xop(kernel, kernel_tm_pack8to1, inch, outch, opt);
return;
}
#endif
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 4b-8a-inch/8a-36-outch/4b
kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 4 + outch % 4, (size_t)2u * 4, 4);
int p = 0;
for (; p + 3 < outch; p += 4)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
Mat g0 = kernel_tm_pack8to1.channel(p / 4);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
#if __AVXVNNI__ || __AVX512VNNI__ || __XOP__
for (int i = 0; i < 4; i++)
{
const short* k00 = k0.row<const short>(q + i * 2);
const short* k10 = k1.row<const short>(q + i * 2);
const short* k20 = k2.row<const short>(q + i * 2);
const short* k30 = k3.row<const short>(q + i * 2);
const short* k01 = k0.row<const short>(q + i * 2 + 1);
const short* k11 = k1.row<const short>(q + i * 2 + 1);
const short* k21 = k2.row<const short>(q + i * 2 + 1);
const short* k31 = k3.row<const short>(q + i * 2 + 1);
g00[0] = k00[k];
g00[1] = k01[k];
g00[2] = k10[k];
g00[3] = k11[k];
g00[4] = k20[k];
g00[5] = k21[k];
g00[6] = k30[k];
g00[7] = k31[k];
g00 += 8;
}
#else
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00[1] = k1.row<const short>(q + i)[k];
g00[2] = k2.row<const short>(q + i)[k];
g00[3] = k3.row<const short>(q + i)[k];
g00 += 4;
}
#endif
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
Mat g0 = kernel_tm_pack8to1.channel(p / 4 + p % 4);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00 += 1;
}
}
}
}
}
static void conv3x3s1_winograd42_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
#if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
extern void conv3x3s1_winograd42_pack8to1_int8_sse_avx512vnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt);
conv3x3s1_winograd42_pack8to1_int8_sse_avx512vnni(bottom_blob, top_blob, kernel_tm, opt);
return;
}
#endif
#if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
extern void conv3x3s1_winograd42_pack8to1_int8_sse_avxvnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt);
conv3x3s1_winograd42_pack8to1_int8_sse_avxvnni(bottom_blob, top_blob, kernel_tm, opt);
return;
}
#endif
#if NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
extern void conv3x3s1_winograd42_pack8to1_int8_sse_avx2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt);
conv3x3s1_winograd42_pack8to1_int8_sse_avx2(bottom_blob, top_blob, kernel_tm, opt);
return;
}
#endif
#if NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
extern void conv3x3s1_winograd42_pack8to1_int8_sse_xop(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt);
conv3x3s1_winograd42_pack8to1_int8_sse_xop(bottom_blob, top_blob, kernel_tm, opt);
return;
}
#endif
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
// size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
short tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
// TODO use _mm_cvtepi8_epi16 on sse4.1
__m128i _r00_01 = _mm_loadu_si128((const __m128i*)r0);
__m128i _r02_03 = _mm_loadu_si128((const __m128i*)(r0 + 16));
__m128i _r04_05 = _mm_loadu_si128((const __m128i*)(r0 + 32));
__m128i _extr0001 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r00_01);
__m128i _extr0203 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r02_03);
__m128i _extr0405 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r04_05);
__m128i _r00 = _mm_unpacklo_epi8(_r00_01, _extr0001);
__m128i _r01 = _mm_unpackhi_epi8(_r00_01, _extr0001);
__m128i _r02 = _mm_unpacklo_epi8(_r02_03, _extr0203);
__m128i _r03 = _mm_unpackhi_epi8(_r02_03, _extr0203);
__m128i _r04 = _mm_unpacklo_epi8(_r04_05, _extr0405);
__m128i _r05 = _mm_unpackhi_epi8(_r04_05, _extr0405);
__m128i _v5 = _mm_set1_epi16(5);
__m128i _tmp0m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r00, 2), _r04), _mm_mullo_epi16(_r02, _v5));
__m128i _tmp1m = _mm_sub_epi16(_mm_add_epi16(_r04, _r03), _mm_slli_epi16(_mm_add_epi16(_r01, _r02), 2));
__m128i _tmp2m = _mm_add_epi16(_mm_sub_epi16(_r04, _r03), _mm_slli_epi16(_mm_sub_epi16(_r01, _r02), 2));
__m128i _tmp3m = _mm_sub_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1));
__m128i _tmp4m = _mm_add_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1));
__m128i _tmp5m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r01, 2), _r05), _mm_mullo_epi16(_r03, _v5));
_mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m);
_mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m);
_mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m);
_mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m);
_mm_storeu_si128((__m128i*)tmp[4][m], _tmp4m);
_mm_storeu_si128((__m128i*)tmp[5][m], _tmp5m);
r0 += w * 8;
}
short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8;
short* r0_tm_1 = r0_tm_0 + tiles * 8;
short* r0_tm_2 = r0_tm_0 + tiles * 16;
short* r0_tm_3 = r0_tm_0 + tiles * 24;
short* r0_tm_4 = r0_tm_0 + tiles * 32;
short* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
__m128i _tmp00 = _mm_loadu_si128((const __m128i*)tmp[m][0]);
__m128i _tmp01 = _mm_loadu_si128((const __m128i*)tmp[m][1]);
__m128i _tmp02 = _mm_loadu_si128((const __m128i*)tmp[m][2]);
__m128i _tmp03 = _mm_loadu_si128((const __m128i*)tmp[m][3]);
__m128i _tmp04 = _mm_loadu_si128((const __m128i*)tmp[m][4]);
__m128i _tmp05 = _mm_loadu_si128((const __m128i*)tmp[m][5]);
__m128i _v5 = _mm_set1_epi16(5);
__m128i _r0tm0 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp00, 2), _tmp04), _mm_mullo_epi16(_tmp02, _v5));
__m128i _r0tm1 = _mm_sub_epi16(_mm_add_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_add_epi16(_tmp01, _tmp02), 2));
__m128i _r0tm2 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp02), 2));
__m128i _r0tm3 = _mm_sub_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1));
__m128i _r0tm4 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1));
__m128i _r0tm5 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp01, 2), _tmp05), _mm_mullo_epi16(_tmp03, _v5));
_mm_storeu_si128((__m128i*)r0_tm_0, _r0tm0);
_mm_storeu_si128((__m128i*)r0_tm_1, _r0tm1);
_mm_storeu_si128((__m128i*)r0_tm_2, _r0tm2);
_mm_storeu_si128((__m128i*)r0_tm_3, _r0tm3);
_mm_storeu_si128((__m128i*)r0_tm_4, _r0tm4);
_mm_storeu_si128((__m128i*)r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __AVX2__
if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#else
if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __AVX2__
for (; i + 3 < tiles; i += 4)
{
short* tmpptr = tm2.row<short>(i / 4);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256i _r0 = _mm256_loadu_si256((const __m256i*)r0);
__m256i _r1 = _mm256_loadu_si256((const __m256i*)(r0 + 16));
_mm256_storeu_si256((__m256i*)tmpptr, _r0);
_mm256_storeu_si256((__m256i*)(tmpptr + 16), _r1);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 32;
}
}
#endif
for (; i + 1 < tiles; i += 2)
{
#if __AVX2__
short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2);
#else
short* tmpptr = tm2.row<short>(i / 2);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m128i _r0 = _mm_loadu_si128((const __m128i*)r0);
__m128i _r1 = _mm_loadu_si128((const __m128i*)(r0 + 8));
_mm_storeu_si128((__m128i*)tmpptr, _r0);
_mm_storeu_si128((__m128i*)(tmpptr + 8), _r1);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 16;
}
}
for (; i < tiles; i++)
{
#if __AVX2__
short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2);
#else
short* tmpptr = tm2.row<short>(i / 2 + i % 2);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m128i _r0 = _mm_loadu_si128((const __m128i*)r0);
_mm_storeu_si128((__m128i*)tmpptr, _r0);
r0 += bottom_blob_tm.cstep * 8;
tmpptr += 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p / 4);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __AVX2__
for (; i + 3 < tiles; i += 4)
{
const short* r0 = bb2.row<const short>(i / 4);
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
__m256i _sum4_5 = _mm256_setzero_si256();
__m256i _sum6_7 = _mm256_setzero_si256();
for (int j = 0; j < nn; j++)
{
// 0 1 2 3 4 5 6 7 8 9 a b c d e f
__m256i _val0 = _mm256_loadu_si256((const __m256i*)r0);
__m256i _w01 = _mm256_loadu_si256((const __m256i*)k0);
__m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
__m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
__m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4));
__m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6));
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val0_0123);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val0_89ab);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val0_4567);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val0_cdef);
#else
// 0 0 1 1 2 2 3 3 8 8 9 9 a a b b
// 4 4 5 5 6 6 7 7 c c d d e e f f
__m256i _val0_0123_89ab = _mm256_unpacklo_epi16(_val0, _val0);
__m256i _val0_4567_cdef = _mm256_unpackhi_epi16(_val0, _val0);
__m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val0_0123);
__m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val0_0123);
__m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val0_89ab);
__m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val0_89ab);
__m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val0_4567);
__m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val0_4567);
__m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val0_cdef);
__m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val0_cdef);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13));
#endif
__m256i _val1 = _mm256_loadu_si256((const __m256i*)(r0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
__m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
__m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4));
__m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6));
_sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w01, _val1_0123);
_sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w01, _val1_89ab);
_sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w23, _val1_4567);
_sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w23, _val1_cdef);
#else
__m256i _val1_0123_89ab = _mm256_unpacklo_epi16(_val1, _val1);
__m256i _val1_4567_cdef = _mm256_unpackhi_epi16(_val1, _val1);
__m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _sl04_05 = _mm256_mullo_epi16(_w01, _val1_0123);
__m256i _sh04_05 = _mm256_mulhi_epi16(_w01, _val1_0123);
__m256i _sl14_15 = _mm256_mullo_epi16(_w01, _val1_89ab);
__m256i _sh14_15 = _mm256_mulhi_epi16(_w01, _val1_89ab);
__m256i _sl06_07 = _mm256_mullo_epi16(_w23, _val1_4567);
__m256i _sh06_07 = _mm256_mulhi_epi16(_w23, _val1_4567);
__m256i _sl16_17 = _mm256_mullo_epi16(_w23, _val1_cdef);
__m256i _sh16_17 = _mm256_mulhi_epi16(_w23, _val1_cdef);
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl04_05, _sh04_05));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl14_15, _sh14_15));
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl06_07, _sh06_07));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl16_17, _sh16_17));
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl04_05, _sh04_05));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl14_15, _sh14_15));
_sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl06_07, _sh06_07));
_sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl16_17, _sh16_17));
#endif
r0 += 32;
k0 += 32;
}
__m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0));
__m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1));
_sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3);
__m256i _sum4_6 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 2, 0, 0));
__m256i _sum5_7 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 3, 0, 1));
_sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7);
int sum[16];
_mm256_storeu_si256((__m256i*)sum, _sum0_2);
_mm256_storeu_si256((__m256i*)(sum + 8), _sum4_6);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm[1] = sum[4];
output1_tm[1] = sum[5];
output2_tm[1] = sum[6];
output3_tm[1] = sum[7];
output0_tm[2] = sum[8];
output1_tm[2] = sum[9];
output2_tm[2] = sum[10];
output3_tm[2] = sum[11];
output0_tm[3] = sum[12];
output1_tm[3] = sum[13];
output2_tm[3] = sum[14];
output3_tm[3] = sum[15];
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
#endif
for (; i + 1 < tiles; i += 2)
{
#if __AVX2__
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2);
#else
const short* r0 = bb2.row<const short>(i / 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
#endif
for (int j = 0; j < nn; j++)
{
#if __AVX2__
// 0 1 2 3 4 5 6 7 8 9 a b c d e f
__m256i _val = _mm256_loadu_si256((const __m256i*)r0);
__m256i _w01 = _mm256_loadu_si256((const __m256i*)k0);
__m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _val_0123 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0));
__m256i _val_4567 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2));
__m256i _val_89ab = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4));
__m256i _val_cdef = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6));
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val_89ab);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val_cdef);
#else
__m256i _val_0123_89ab = _mm256_unpacklo_epi16(_val, _val);
__m256i _val_4567_cdef = _mm256_unpackhi_epi16(_val, _val);
__m256i _val_0123 = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val_4567 = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _val_89ab = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _val_cdef = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4));
__m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123);
__m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123);
__m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val_89ab);
__m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val_89ab);
__m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567);
__m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567);
__m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val_cdef);
__m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val_cdef);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13));
#endif
#else
// 0 1 2 3 4 5 6 7
__m128i _val0 = _mm_loadu_si128((const __m128i*)r0);
__m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8));
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8));
__m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16));
__m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24));
#if __XOP__
__m128i _val0_01 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(0, 0, 0, 0));
__m128i _val0_23 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 1, 1, 1));
__m128i _val0_45 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(2, 2, 2, 2));
__m128i _val0_67 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(3, 3, 3, 3));
__m128i _val1_01 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(0, 0, 0, 0));
__m128i _val1_23 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(1, 1, 1, 1));
__m128i _val1_45 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(2, 2, 2, 2));
__m128i _val1_67 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(3, 3, 3, 3));
_sum0 = _mm_maddd_epi16(_val0_01, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val0_23, _w1, _sum1);
_sum2 = _mm_maddd_epi16(_val1_01, _w0, _sum2);
_sum3 = _mm_maddd_epi16(_val1_23, _w1, _sum3);
_sum0 = _mm_maddd_epi16(_val0_45, _w2, _sum0);
_sum1 = _mm_maddd_epi16(_val0_67, _w3, _sum1);
_sum2 = _mm_maddd_epi16(_val1_45, _w2, _sum2);
_sum3 = _mm_maddd_epi16(_val1_67, _w3, _sum3);
#else
// 0 0 1 1 2 2 3 3
// 4 4 5 5 6 6 7 7
__m128i _val0_0123 = _mm_unpacklo_epi16(_val0, _val0);
__m128i _val0_4567 = _mm_unpackhi_epi16(_val0, _val0);
__m128i _val1_0123 = _mm_unpacklo_epi16(_val1, _val1);
__m128i _val1_4567 = _mm_unpackhi_epi16(_val1, _val1);
__m128i _val0_01 = _mm_unpacklo_epi32(_val0_0123, _val0_0123);
__m128i _val0_23 = _mm_unpackhi_epi32(_val0_0123, _val0_0123);
__m128i _val0_45 = _mm_unpacklo_epi32(_val0_4567, _val0_4567);
__m128i _val0_67 = _mm_unpackhi_epi32(_val0_4567, _val0_4567);
__m128i _val1_01 = _mm_unpacklo_epi32(_val1_0123, _val1_0123);
__m128i _val1_23 = _mm_unpackhi_epi32(_val1_0123, _val1_0123);
__m128i _val1_45 = _mm_unpacklo_epi32(_val1_4567, _val1_4567);
__m128i _val1_67 = _mm_unpackhi_epi32(_val1_4567, _val1_4567);
__m128i _sl00 = _mm_mullo_epi16(_w0, _val0_01);
__m128i _sh00 = _mm_mulhi_epi16(_w0, _val0_01);
__m128i _sl10 = _mm_mullo_epi16(_w0, _val1_01);
__m128i _sh10 = _mm_mulhi_epi16(_w0, _val1_01);
__m128i _sl01 = _mm_mullo_epi16(_w1, _val0_23);
__m128i _sh01 = _mm_mulhi_epi16(_w1, _val0_23);
__m128i _sl11 = _mm_mullo_epi16(_w1, _val1_23);
__m128i _sh11 = _mm_mulhi_epi16(_w1, _val1_23);
__m128i _sl02 = _mm_mullo_epi16(_w2, _val0_45);
__m128i _sh02 = _mm_mulhi_epi16(_w2, _val0_45);
__m128i _sl12 = _mm_mullo_epi16(_w2, _val1_45);
__m128i _sh12 = _mm_mulhi_epi16(_w2, _val1_45);
__m128i _sl03 = _mm_mullo_epi16(_w3, _val0_67);
__m128i _sh03 = _mm_mulhi_epi16(_w3, _val0_67);
__m128i _sl13 = _mm_mullo_epi16(_w3, _val1_67);
__m128i _sh13 = _mm_mulhi_epi16(_w3, _val1_67);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl11, _sh11));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl11, _sh11));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl02, _sh02));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl02, _sh02));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl12, _sh12));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl12, _sh12));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl03, _sh03));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl03, _sh03));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl13, _sh13));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl13, _sh13));
#endif
#endif
r0 += 16;
k0 += 32;
}
#if __AVX2__
__m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0));
__m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1));
_sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3);
int sum[8];
_mm256_storeu_si256((__m256i*)sum, _sum0_2);
#else
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
int sum[8];
_mm_storeu_si128((__m128i*)sum, _sum0);
_mm_storeu_si128((__m128i*)(sum + 4), _sum2);
#endif
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm[1] = sum[4];
output1_tm[1] = sum[5];
output2_tm[1] = sum[6];
output3_tm[1] = sum[7];
output0_tm += 2;
output1_tm += 2;
output2_tm += 2;
output3_tm += 2;
}
for (; i < tiles; i++)
{
#if __AVX2__
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2);
#else
const short* r0 = bb2.row<const short>(i / 2 + i % 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
int nn = inch; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
#endif
for (int j = 0; j < nn; j++)
{
// 0 1 2 3 4 5 6 7
__m128i _val = _mm_loadu_si128((const __m128i*)r0);
#if __AVX2__
__m256i _w01 = _mm256_loadu_si256((const __m256i*)k0);
__m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16));
#if __AVXVNNI__ || __AVX512VNNI__
// 0 1 0 1 x x x x
// 0 1 0 1 0 1 0 1
__m128i _val_01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0));
__m128i _val_23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1));
__m128i _val_45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2));
__m128i _val_67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3));
__m256i _val_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_01), _val_23, 1);
__m256i _val_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_45), _val_67, 1);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123);
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567);
#else
// 0 0 1 1 2 2 3 3
// 4 4 5 5 6 6 7 7
__m256i _val_0123 = _mm256_castsi128_si256(_mm_unpacklo_epi16(_val, _val));
__m256i _val_4567 = _mm256_castsi128_si256(_mm_unpackhi_epi16(_val, _val));
_val_0123 = _mm256_permutevar8x32_epi32(_val_0123, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
_val_4567 = _mm256_permutevar8x32_epi32(_val_4567, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
__m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123);
__m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123);
__m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567);
__m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03));
#endif
#else
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8));
__m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16));
__m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24));
#if __XOP__
__m128i _val01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0));
__m128i _val23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1));
__m128i _val45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2));
__m128i _val67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3));
_sum0 = _mm_maddd_epi16(_val01, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val23, _w1, _sum1);
_sum0 = _mm_maddd_epi16(_val45, _w2, _sum0);
_sum1 = _mm_maddd_epi16(_val67, _w3, _sum1);
#else
// 0 0 1 1 2 2 3 3
// 4 4 5 5 6 6 7 7
__m128i _val_0123 = _mm_unpacklo_epi16(_val, _val);
__m128i _val_4567 = _mm_unpackhi_epi16(_val, _val);
__m128i _val01 = _mm_unpacklo_epi32(_val_0123, _val_0123);
__m128i _val23 = _mm_unpackhi_epi32(_val_0123, _val_0123);
__m128i _val45 = _mm_unpacklo_epi32(_val_4567, _val_4567);
__m128i _val67 = _mm_unpackhi_epi32(_val_4567, _val_4567);
__m128i _sl0 = _mm_mullo_epi16(_w0, _val01);
__m128i _sh0 = _mm_mulhi_epi16(_w0, _val01);
__m128i _sl1 = _mm_mullo_epi16(_w1, _val23);
__m128i _sh1 = _mm_mulhi_epi16(_w1, _val23);
__m128i _sl2 = _mm_mullo_epi16(_w2, _val45);
__m128i _sh2 = _mm_mulhi_epi16(_w2, _val45);
__m128i _sl3 = _mm_mullo_epi16(_w3, _val67);
__m128i _sh3 = _mm_mulhi_epi16(_w3, _val67);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl1, _sh1));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2));
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl3, _sh3));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl3, _sh3));
#endif
#endif
r0 += 8;
k0 += 32;
}
#if __AVX2__
__m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1);
#endif
_sum0 = _mm_add_epi32(_sum0, _sum1);
int sum[4];
_mm_storeu_si128((__m128i*)sum, _sum0);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
}
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __AVX2__
for (; i + 3 < tiles; i += 4)
{
const short* r0 = bb2.row<const short>(i / 4);
const short* k0 = kernel0_tm.row<const short>(r);
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
__m128i _sum4 = _mm_setzero_si128();
__m128i _sum5 = _mm_setzero_si128();
__m128i _sum6 = _mm_setzero_si128();
__m128i _sum7 = _mm_setzero_si128();
for (int q = 0; q < inch; q++)
{
__m128i _val0 = _mm_loadu_si128((const __m128i*)r0);
__m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8));
__m128i _val2 = _mm_loadu_si128((const __m128i*)(r0 + 16));
__m128i _val3 = _mm_loadu_si128((const __m128i*)(r0 + 24));
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _sl0 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh0 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl1 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh1 = _mm_mulhi_epi16(_val1, _w0);
__m128i _sl2 = _mm_mullo_epi16(_val2, _w0);
__m128i _sh2 = _mm_mulhi_epi16(_val2, _w0);
__m128i _sl3 = _mm_mullo_epi16(_val3, _w0);
__m128i _sh3 = _mm_mulhi_epi16(_val3, _w0);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1));
_sum4 = _mm_add_epi32(_sum4, _mm_unpacklo_epi16(_sl2, _sh2));
_sum5 = _mm_add_epi32(_sum5, _mm_unpackhi_epi16(_sl2, _sh2));
_sum6 = _mm_add_epi32(_sum6, _mm_unpacklo_epi16(_sl3, _sh3));
_sum7 = _mm_add_epi32(_sum7, _mm_unpackhi_epi16(_sl3, _sh3));
k0 += 8;
r0 += 32;
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum4 = _mm_add_epi32(_sum4, _sum5);
_sum6 = _mm_add_epi32(_sum6, _sum7);
output0_tm[0] = _mm_reduce_add_epi32(_sum0);
output0_tm[1] = _mm_reduce_add_epi32(_sum2);
output0_tm[2] = _mm_reduce_add_epi32(_sum4);
output0_tm[3] = _mm_reduce_add_epi32(_sum6);
output0_tm += 4;
}
#endif
for (; i + 1 < tiles; i += 2)
{
#if __AVX2__
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2);
#else
const short* r0 = bb2.row<const short>(i / 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
for (int q = 0; q < inch; q++)
{
__m128i _val0 = _mm_loadu_si128((const __m128i*)r0);
__m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8));
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _sl0 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh0 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl1 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh1 = _mm_mulhi_epi16(_val1, _w0);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1));
k0 += 8;
r0 += 16;
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
output0_tm[0] = _mm_reduce_add_epi32(_sum0);
output0_tm[1] = _mm_reduce_add_epi32(_sum2);
output0_tm += 2;
}
for (; i < tiles; i++)
{
#if __AVX2__
const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2);
#else
const short* r0 = bb2.row<const short>(i / 2 + i % 2);
#endif
const short* k0 = kernel0_tm.row<const short>(r);
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
for (int q = 0; q < inch; q++)
{
__m128i _val = _mm_loadu_si128((const __m128i*)r0);
__m128i _w0 = _mm_loadu_si128((const __m128i*)k0);
__m128i _sl0 = _mm_mullo_epi16(_val, _w0);
__m128i _sh0 = _mm_mulhi_epi16(_val, _w0);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0));
k0 += 8;
r0 += 8;
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
output0_tm[0] = _mm_reduce_add_epi32(_sum0);
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
int tmp[4][6];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1;
const int* output0_tm_1 = output0_tm_0 + tiles * 1;
const int* output0_tm_2 = output0_tm_0 + tiles * 2;
const int* output0_tm_3 = output0_tm_0 + tiles * 3;
const int* output0_tm_4 = output0_tm_0 + tiles * 4;
const int* output0_tm_5 = output0_tm_0 + tiles * 5;
int* output0 = out0.row<int>(i * 4) + j * 4;
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
// TODO sse optimize
for (int m = 0; m < 5; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b;
tmp[1][m] = tmp13a + tmp13b * 2;
tmp[2][m] = tmp02a + tmp02b * 4;
tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 5; m < 6; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4;
tmp[1][m] = (tmp13a + tmp13b * 2) * 4;
tmp[2][m] = (tmp02a + tmp02b * 4) * 4;
tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 0; m < 4; m++)
{
const int* tmp0 = tmp[m];
int tmp02a = tmp0[1] + tmp0[2];
int tmp13a = tmp0[1] - tmp0[2];
int tmp02b = tmp0[3] + tmp0[4];
int tmp13b = tmp0[3] - tmp0[4];
output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576;
output0[1] = (tmp13a + tmp13b * 2) / 576;
output0[2] = (tmp02a + tmp02b * 4) / 576;
output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
matrix_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file matrix_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <string>
#include <algorithm>
#include <utility>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../channel_op_common.h"
#include "../mxnet_op.h"
#include "broadcast_reduce_op.h"
#include "./init_op.h"
#include "../../common/static_array.h"
#include "./slice-inl.h"
#if MXNET_USE_CUDA
#include <thrust/device_vector.h>
#endif
#ifdef __CUDACC__
#include "./pseudo2DTranspose_op-inl.cuh"
#endif
namespace mxnet {
namespace op {
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
mxnet::TShape target_shape;
bool keep_highest;
mxnet::Tuple<int> shape;
bool reverse;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape).set_default(mxnet::Tuple<int>()).describe("The target shape");
DMLC_DECLARE_FIELD(reverse).set_default(false).describe(
"If true then the special values are inferred from right to left");
DMLC_DECLARE_FIELD(target_shape)
.set_default(mxnet::TShape(0, -1))
.describe(
"(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest)
.set_default(false)
.describe(
"(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
bool operator==(const ReshapeParam& other) const {
return this->target_shape == other.target_shape && this->keep_highest == other.keep_highest &&
this->shape == other.shape && this->reverse == other.reverse;
}
};
#if MXNET_USE_ONEDNN == 1
bool ReshapeStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs);
void ReshapeComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs);
#endif // MXNET_USE_ONEDNN == 1
template <typename IType>
inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape,
const mxnet::TShape& dshape,
bool reverse) {
std::vector<IType> dshape_vec;
std::vector<IType> param_shape_vec(shape.begin(), shape.end());
for (int i = 0; i < dshape.ndim(); ++i) {
dshape_vec.push_back(dshape[i]);
}
std::vector<IType> tmp;
size_t src_idx = 0;
int inf_idx = -1;
if (reverse) {
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
}
auto dshape_len = dshape_vec.size();
auto params_len = param_shape_vec.size();
for (size_t i = 0; i < params_len; ++i) {
IType proposed_dim = param_shape_vec[i];
if (proposed_dim == 0) {
// keep same
CHECK_LT(src_idx, dshape_len);
tmp.push_back(dshape_vec[src_idx++]);
} else if (proposed_dim == -1) {
// infer
CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred";
inf_idx = i;
tmp.push_back(1);
src_idx++;
} else if (proposed_dim == -2) {
// copy all remaining dims from source
while (src_idx < dshape_len) {
const int dn = dshape_vec[src_idx++];
tmp.push_back(dn);
}
} else if (proposed_dim == -3) {
// merge two dims from source
CHECK_LT(src_idx, dshape_len - 1);
const int d1 = dshape_vec[src_idx++];
const int d2 = dshape_vec[src_idx++];
if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
tmp.push_back(-1);
} else {
tmp.push_back(d1 * d2);
}
} else if (proposed_dim == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, params_len);
CHECK_LT(src_idx, dshape_len);
const int d0 = dshape_vec[src_idx++];
IType d1 = param_shape_vec[++i];
IType d2 = param_shape_vec[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1 && d0 >= 0)
d1 = d0 / d2; // d0 must be known to do this
if (d2 == -1 && d0 >= 0)
d2 = d0 / d1; // d0 must be known to do this
CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1))
<< "Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
tmp.push_back(d1);
tmp.push_back(d2);
} else {
// greater than 0, new shape
tmp.push_back(proposed_dim);
src_idx++;
}
}
if (inf_idx >= 0) {
if (shape_is_known(dshape)) {
IType new_size = 1;
for (IType x : tmp)
new_size *= x;
tmp[inf_idx] = dshape.Size() / new_size;
} else {
tmp[inf_idx] = -1;
}
}
if (reverse) {
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(tmp.begin(), tmp.end());
}
mxnet::TShape oshape(tmp.begin(), tmp.end());
return oshape;
}
inline bool ReverseReshapeInferShape(mxnet::TShape* in, const mxnet::TShape& out) {
if (shape_is_known(*in) && shape_is_known(out)) {
return true;
} else if (!shape_is_known(out)) {
return false;
} else {
int zero_axis = -1;
int known_dim_size_prod = 1;
for (int i = 0; i < in->ndim(); i++) {
if (!mxnet::dim_size_is_known(*in, i)) {
if (zero_axis != -1)
return false; // more than 1 zero found.
else
zero_axis = i;
} else {
known_dim_size_prod *= (*in)[i];
}
}
(*in)[zero_axis] = out.Size() / known_dim_size_prod;
return true;
}
}
inline bool ReshapeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape))
return false;
mxnet::TShape oshape;
if (param_.shape.ndim() != 0) {
oshape = InferReshapeShape(param_.shape, dshape, param_.reverse);
} else if (param_.target_shape.ndim() != -1) {
LOG(INFO) << "Using target_shape will be deprecated.";
oshape = param_.target_shape;
int neg_count = 0;
index_t inf_idx = 0;
index_t start_idx = param_.keep_highest ? 1 : 0;
if (param_.keep_highest) {
oshape[0] = dshape[0];
}
for (int i = start_idx; i < oshape.ndim(); ++i) {
if (oshape[i] == 0) {
neg_count++;
inf_idx = i;
}
}
if (neg_count == 1) {
oshape[inf_idx] = 1;
oshape[inf_idx] = dshape.Size() / oshape.Size();
}
} else {
return shape_is_known((*out_attrs)[0]) &&
ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
ReverseReshapeInferShape(&dshape, oshape);
#if 0
CHECK_EQ(oshape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << oshape
<< "\nSource: " << dshape;
#endif
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
inline bool FlattenShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape))
return false;
size_t target_dim = 1;
for (int i = 1; i < dshape.ndim(); ++i) {
target_dim *= dshape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim));
return true;
}
struct TransposeParam : public dmlc::Parameter<TransposeParam> {
mxnet::TShape axes;
DMLC_DECLARE_PARAMETER(TransposeParam) {
DMLC_DECLARE_FIELD(axes)
.set_default(mxnet::TShape(0, -1))
.describe("Target axis order. By default the axes will be inverted.");
}
bool operator==(const TransposeParam& other) const {
return this->axes == other.axes;
}
};
/*!
* \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache
* \param in input tensor
* \param out output tensor
* \param row shape of dim 0 of input
* \param col shape of dim 1 of input
* \tparam DType Data type
* \tparam is_addto
*/
template <typename DType, bool is_addto>
MSHADOW_XINLINE void Transpose2D(const DType* in, DType* out, index_t row, index_t col) {
// ensure cache line hits and prevent cache miss for any configuration
// L1 cache size to be utilized = 32kb = 2^15
// Largest size of a single unit of any dtype <= 8 byte = 2^3
// Number of elements - (2^15/2^3) = 2^12
// Block-size - 2^6 v 2^6 (64 v 64)
// But we could leverage unrolling of for loops (for parallelization)
// Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled
// blocksize * blocksize * num_threads = cache_size / dtype_size
// Instead of explicit unroll, let compiler figure out optimal unroll factor
const index_t blocksize = 32;
// collapse 2 parallelizes 2 for loops
// inner 2 for loops aren't parallelized to prevent cache miss
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (index_t i = 0; i < row; i += blocksize) {
for (index_t j = 0; j < col; j += blocksize) {
// transpose the block
for (index_t a = j; (a < blocksize + j) && (a < col); ++a) {
for (index_t b = i; (b < blocksize + i) && (b < row); ++b) {
if (!is_addto) {
out[a * row + b] = in[b * col + a];
} else {
out[a * row + b] += in[b * col + a];
}
}
}
}
}
}
inline bool IsIdentityTranspose(const TShape& axes) {
for (dim_t i = 0; i < axes.ndim(); i++) {
if (axes[i] != i)
return false;
}
return true;
}
template <typename xpu, bool is_addto = false>
bool TransposeCommonImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
// return true when running successfully, otherwise false
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(src.type_flag_, ret.type_flag_);
// zero-size tensor, no need to compute
if (src.shape_.Size() == 0U)
return true;
Stream<xpu>* s = ctx.get_stream<xpu>();
#ifdef __CUDACC__
// This transpose can be used only if there exist n and m such that:
// params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1)
// Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3).
if (isPseudo2DTranspose(axes)) {
MSHADOW_TYPE_SWITCH(
ret.type_flag_, DType, { transpose_pseudo2D<DType, is_addto>(ret, src, axes, s); });
return true;
}
#endif
// Special handle the identity case
if (IsIdentityTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s);
Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s);
if (!is_addto) {
// Use memcpy to accelerate the speed
Copy(out, in, s);
} else {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch(
s, ret.Size(), out.dptr_, in.dptr_);
}
});
return true;
}
// Handle the general transpose case
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
switch (axes.ndim()) {
case 2: {
Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s);
if (ctx.get_ctx().dev_mask() == cpu::kDevMask) {
Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]);
} else {
LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case "
"in GPU has been covered by transpose_pseudo2D."
" Report an issue in Github.";
}
break;
}
case 3: {
Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s);
Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<3>());
} else {
out += transpose(in, axes.get<3>());
}
break;
}
case 4: {
Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<4>());
} else {
out += transpose(in, axes.get<4>());
}
break;
}
case 5: {
Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s);
Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<5>());
} else {
out += transpose(in, axes.get<5>());
}
break;
}
case 6: {
Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s);
Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<6>());
} else {
out += transpose(in, axes.get<6>());
}
break;
}
default:
// return false when dimensions > 6
return false;
break;
}
});
return true;
}
template <typename xpu, bool is_addto = false>
void TransposeImpl(RunContext ctx, const TBlob& src, const TBlob& ret, const mxnet::TShape& axes) {
CHECK_LE(axes.ndim(), 6) << "TransposeImpl supports at most 6 dimensions";
CHECK((TransposeCommonImpl<xpu, is_addto>(ctx, src, ret, axes)))
<< "Failed to execute TransposeImpl Operator";
}
template <bool is_addto>
struct TransposeExKernel {
/*!
* \brief
* \param tid global thread id
* \param out_data output data
* \param in_data input data
* \param strides input strides and output strides
* \param ndim the number of dimension
*/
template <typename DType>
MSHADOW_XINLINE static void Map(index_t tid,
DType* out_data,
const DType* in_data,
const dim_t* strides,
const int ndim) {
// tid is the index of input data
const dim_t* const out_strides = strides + ndim;
index_t k = tid;
index_t out_id = 0;
for (int i = 0; i < ndim; ++i) {
out_id += (k / strides[i]) * out_strides[i];
k %= strides[i];
}
if (is_addto)
out_data[out_id] += in_data[tid];
else
out_data[out_id] = in_data[tid];
}
};
template <typename xpu, bool is_addto = false>
void TransposeExImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes,
mshadow::Tensor<xpu, 1, dim_t>& strides_xpu) { // NOLINT(*)
/*
* If ndim <= 6, it is not necessary to allocate any space for `strides_xpu`
* If ndim > 6, `strides_xpu` should be allocated `ndim * 2` elements
*/
using namespace mshadow;
using namespace mshadow::expr;
if (TransposeCommonImpl<xpu, is_addto>(ctx, src, ret, axes))
return;
CHECK_GT(axes.ndim(), 6) << "Failed to execute TransposeExImpl when axes.ndim() <= 6";
Stream<xpu>* s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
CHECK_EQ(strides_xpu.MSize(), axes.ndim() * 2)
<< "If ndim > 6, `strides_xpu` should be allocated `ndim * 2` elements";
const mxnet::TShape& in_shape = src.shape_;
// strides: in_strides and out_strides
const int ndim = axes.ndim();
std::vector<dim_t> strides(ndim * 2);
// compute in_strides
strides[ndim - 1] = 1;
for (int i = ndim - 2; i >= 0; --i) {
strides[i] = strides[i + 1] * in_shape[i + 1];
}
// compute out_strides
std::vector<dim_t> tmp_strides(ndim);
tmp_strides[ndim - 1] = 1;
for (int i = ndim - 2; i >= 0; --i) {
tmp_strides[i] = tmp_strides[i + 1] * in_shape[axes[i + 1]];
}
// reorder tmp_strides to out_strides
dim_t* const out_strides = &strides[ndim];
for (int i = 0; i < ndim; ++i) {
out_strides[axes[i]] = tmp_strides[i];
}
Shape<1> strides_shape;
strides_shape[0] = ndim * 2;
Tensor<cpu, 1, dim_t> strides_cpu(strides.data(), strides_shape);
// copy arguments into xpu context
Copy(strides_xpu, strides_cpu, s);
const DType* in = src.dptr<DType>();
DType* out = ret.dptr<DType>();
if (is_addto) {
mxnet_op::Kernel<TransposeExKernel<true>, xpu>::Launch(
s, in_shape.Size(), out, in, strides_xpu.dptr_, ndim);
} else {
mxnet_op::Kernel<TransposeExKernel<false>, xpu>::Launch(
s, in_shape.Size(), out, in, strides_xpu.dptr_, ndim);
}
});
}
template <typename xpu>
mshadow::Tensor<xpu, 1, dim_t> GetTransposeExWorkspace(const OpContext& ctx,
const mxnet::TShape& axes) {
if (axes.ndim() > 6) {
// allocate workspace when axes.ndim() > 6
mshadow::Shape<1> strides_shape;
strides_shape[0] = axes.ndim() * 2;
return ctx.requested[0].get_space_typed<xpu, 1, dim_t>(strides_shape, ctx.get_stream<xpu>());
}
return {};
}
// matrix transpose
template <typename xpu>
void Transpose(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (req[0] == kNullOp) {
return;
}
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK(req[0] == kWriteTo || req[0] == kAddTo)
<< "Transpose only supports kNullOp, kWriteTo and kAddTo";
mxnet::TShape axes;
if (param.axes.ndim() == 0) {
axes = mxnet::TShape(inputs[0].ndim(), -1);
for (int i = 0; i < axes.ndim(); ++i) {
axes[i] = axes.ndim() - 1 - i;
}
} else {
axes = common::CanonicalizeAxes(param.axes);
}
mshadow::Tensor<xpu, 1, dim_t> workspace = GetTransposeExWorkspace<xpu>(ctx, axes);
if (req[0] == kAddTo) {
TransposeExImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0], axes, workspace);
} else {
TransposeExImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0], axes, workspace);
}
}
inline bool TransposeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
mxnet::TShape& out_shp = (*out_attrs)[0];
if (!mxnet::ndim_is_known(shp) && !mxnet::ndim_is_known(out_shp))
return false; // none of the shapes is known
if (out_shp.ndim() >= 0 && shp.ndim() >= 0)
CHECK_EQ(out_shp.ndim(), shp.ndim());
mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1);
mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1);
if (param.axes.ndim() == 0) {
for (int i = 0; i < shp.ndim(); ++i) {
ret[i] = shp[shp.ndim() - 1 - i];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[shp.ndim() - 1 - i] = out_shp[i];
}
} else {
CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim());
for (int i = 0; i < shp.ndim(); ++i) {
CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim()));
ret[i] = shp[param.axes[i]];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[param.axes[i]] = out_shp[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 0, get);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
return shape_is_known(ret);
}
struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> {
int axis;
DMLC_DECLARE_PARAMETER(ExpandDimParam) {
DMLC_DECLARE_FIELD(axis).describe(
"Position where new axis is to be inserted. Suppose that "
"the input `NDArray`'s dimension is `ndim`, the range of "
"the inserted axis is `[-ndim, ndim]`");
}
bool operator==(const ExpandDimParam& other) const {
return this->axis == other.axis;
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& oshape = (*out_attrs)[0];
if (!mxnet::ndim_is_known(ishape) && !mxnet::ndim_is_known(oshape)) {
return false;
}
int indim = ishape.ndim();
bool unknown_ishape = false;
if (-1 == indim) {
indim = oshape.ndim() - 1;
unknown_ishape = true;
}
int axis = param.axis;
if (axis < 0) {
axis += indim + 1;
}
CHECK(axis >= 0 && axis <= indim) << "axis must be in the range [" << -indim << ", " << indim
<< "] (" << param.axis << " provided)";
mxnet::TShape ret(indim + 1, -1);
for (int i = 0; i < axis; ++i) {
ret[i] = (unknown_ishape ? -1 : ishape[i]);
}
ret[axis] = 1;
for (int i = axis + 1; i < indim + 1; ++i) {
ret[i] = (unknown_ishape ? -1 : ishape[i - 1]);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
ret = mxnet::TShape(indim, -1);
for (int i = 0; i < axis; ++i)
ret[i] = oshape[i];
for (int i = axis + 1; i < indim + 1; ++i)
ret[i - 1] = oshape[i];
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret);
return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0));
}
// Currently DNNL only supports step = 1 or step has no value
inline bool SupportDNNLSlice(const SliceParam& param) {
if (param.step.ndim() == 0U)
return true;
for (int i = 0; i < param.step.ndim(); ++i) {
if (param.step[i].has_value() && param.step[i].value() != 1)
return false;
}
return true;
}
inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
const auto& in_stype = in_attrs->at(0);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const auto dispatch_ex = DispatchMode::kFComputeEx;
// If step = 1, no need to fallback; otherwise fallback to dense
bool trivial_step = false;
if (param.step.ndim() == 0U) {
trivial_step = true;
} else if (param.step.ndim() == 1U &&
(!param.step[0].has_value() || param.step[0].value() == 1)) {
trivial_step = true;
}
if (in_stype == kDefaultStorage) {
#if MXNET_USE_ONEDNN == 1
if (dev_mask == Context::kCPU && DNNLEnvSet() && SupportDNNLSlice(param)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, dispatch_ex);
}
#endif
if (!dispatched) {
dispatched =
storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute);
}
}
if (!dispatched && in_stype == kCSRStorage && trivial_step) {
dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
// slice the indptr of a csr
struct SliceCsrIndPtr {
template <typename IType>
MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) {
KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base);
}
};
/*
* a wrapper to launch SliceCsrIndPtr kernel.
* slice [src[begin] .. src[end]) and store in dst[0, end - begin)
*/
template <typename xpu, typename IType>
void SliceCsrIndPtrImpl(const int begin,
const int end,
RunContext ctx,
const IType* src,
IType* dst) {
using namespace mshadow;
using namespace mxnet_op;
Stream<xpu>* s = ctx.get_stream<xpu>();
int indptr_len = end - begin + 1;
Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin);
}
/*
* Slice a CSR NDArray for first dimension
*/
template <typename xpu>
void SliceDimOneCsrImpl(const mxnet::TShape& begin,
const mxnet::TShape& end,
const OpContext& ctx,
const NDArray& in,
const NDArray& out) {
using namespace mshadow;
using namespace mxnet_op;
using namespace csr;
nnvm::dim_t begin_row = begin[0];
nnvm::dim_t end_row = end[0];
nnvm::dim_t indptr_len = end_row - begin_row + 1;
out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len));
// assume idx indptr share the same type
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, {
MSHADOW_TYPE_SWITCH(in.dtype(), DType, {
RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>();
RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>();
SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr);
Stream<xpu>* s = ctx.get_stream<xpu>();
RType nnz = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)),
Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s));
// return csr zeros if nnz = 0
if (nnz == 0) {
out.set_aux_shape(kIdx, Shape1(0));
return;
}
// copy indices and values
out.CheckAndAllocAuxData(kIdx, Shape1(nnz));
out.CheckAndAllocData(Shape1(nnz));
IType* in_idx = in.aux_data(kIdx).dptr<IType>();
IType* out_idx = out.aux_data(kIdx).dptr<IType>();
DType* in_data = in.data().dptr<DType>();
DType* out_data = out.data().dptr<DType>();
RType offset = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)),
Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s));
mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s),
Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s),
s);
mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s),
Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s),
s);
});
});
});
}
/*!
* \brief slice a CSRNDArray for two dimensions
*/
struct SliceDimTwoCsrAssign {
/*!
* \brief This function slices a CSRNDArray on axis one between begin_col and end_col
* \param i loop index
* \param out_idx output csr ndarray column indices
* \param out_data output csr ndarray data
* \param out_indptr output csr ndarray row index pointer
* \param in_idx input csr ndarray column indices
* \param in_data input csr ndarray data
* \param in_indptr input csr ndarray row index pointer
* \param begin_col begin column indice
* \param end_col end column indice
*/
template <typename IType, typename RType, typename DType>
MSHADOW_XINLINE static void Map(int i,
IType* out_idx,
DType* out_data,
const RType* out_indptr,
const IType* in_idx,
const DType* in_data,
const RType* in_indptr,
const int begin_col,
const int end_col) {
RType ind = out_indptr[i];
for (RType j = in_indptr[i]; j < in_indptr[i + 1]; j++) {
// indices of CSRNDArray are in ascending order per row
if (in_idx[j] >= end_col) {
break;
} else if (in_idx[j] >= begin_col) {
out_idx[ind] = in_idx[j] - begin_col;
out_data[ind] = in_data[j];
ind++;
}
}
}
};
/*
* Slice a CSR NDArray for two dimensions
*/
template <typename xpu>
void SliceDimTwoCsrImpl(const mxnet::TShape& begin,
const mxnet::TShape& end,
const OpContext& ctx,
const NDArray& in,
const NDArray& out);
template <typename xpu>
void SliceCsrImpl(const SliceParam& param,
const OpContext& ctx,
const NDArray& in,
OpReqType req,
const NDArray& out) {
if (req == kNullOp)
return;
CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported";
CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported";
const mxnet::TShape ishape = in.shape();
const mxnet::TShape oshape = out.shape();
int N = ishape.ndim();
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0)
s += ishape[i];
}
begin[i] = s;
end[i] = s + oshape[i];
}
switch (N) {
case 1: {
SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
case 2: {
SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
default:
LOG(FATAL) << "CSR is only for 2-D shape";
break;
}
}
template <typename xpu>
void SliceEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
auto in_stype = inputs[0].storage_type();
if (in_stype == kCSRStorage) {
SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
} else {
LOG(FATAL) << "Slice not implemented for storage type" << in_stype;
}
}
template <int ndim>
inline bool GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
// Function returns false if output is zero-sized, true otherwise.
bool zero_size_shape = false;
CHECK_NE(dshape.ndim(), 0U);
CHECK_LE(param_begin.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions";
CHECK_LE(param_end.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions";
CHECK_EQ(param_begin.ndim(), param_end.ndim()) << "begin and end must have the same length";
CHECK_EQ(ndim, dshape.ndim()) << "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim()) << "step and begin must have the same length";
}
for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";
index_t b = 0, e = 0;
const index_t len = dshape[i];
if (len > 0) {
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);
if (b < 0) {
b += len;
}
if (e < 0 && param_end[i].has_value()) {
e += len;
}
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}
(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
// checking begin==end
if (b == e) {
zero_size_shape = true;
}
}
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
return zero_size_shape;
}
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i,
const index_t b,
const index_t e,
const index_t s,
mxnet::TShape* oshape) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} else {
(*oshape)[i] = 0;
}
}
inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape))
return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}
template <int ndim, int req, typename xpu>
struct slice_forward;
template <int ndim, int req>
struct slice_forward<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim - 1];
const index_t out_last_dim_size = oshape[ndim - 1];
const index_t step_last_dim = step[ndim - 1];
const index_t begin_last_dim = begin[ndim - 1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(
out[i], req, data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
};
template <int ndim, int req>
struct slice_forward<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim - 1];
const index_t out_last_dim_size = oshape[ndim - 1];
const index_t step_last_dim = step[ndim - 1];
const index_t begin_last_dim = begin[ndim - 1];
index_t out_offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[out_offset++],
req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
}
};
template <typename xpu>
void SliceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp)
return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0)
return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
size_t num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(
s,
num_threads,
out.dptr<DType>(),
data.dptr<DType>(),
data.shape_.get<ndim>(),
out.shape_.get<ndim>(),
begin,
step);
})})
})
}
template <int ndim, int req, typename xpu>
struct slice_assign;
template <int ndim, int req>
struct slice_assign<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim - 1];
const index_t out_last_dim_size = vshape[ndim - 1];
const index_t step_last_dim = step[ndim - 1];
const index_t begin_last_dim = begin[ndim - 1];
index_t offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(
out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[offset++]);
}
}
};
template <int ndim, int req>
struct slice_assign<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim - 1];
const index_t out_last_dim_size = vshape[ndim - 1];
const index_t step_last_dim = step[ndim - 1];
const index_t begin_last_dim = begin[ndim - 1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[i]);
}
};
template <typename xpu>
void SliceOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp)
return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0)
return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(
s,
num_threads,
igrad.dptr<DType>(),
ograd.dptr<DType>(),
igrad.shape_.get<ndim>(),
ograd.shape_.get<ndim>(),
begin,
step);
})})
})
}
inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape))
return false;
mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template <typename xpu>
void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& val = inputs[1];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace";
}
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape =
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspace needs no operation.
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
index_t num_threads = val.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= val.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(
s,
num_threads,
out.dptr<DType>(),
val.dptr<DType>(),
out.shape_.get<ndim>(),
val.shape_.get<ndim>(),
begin,
step);
})})
})
}
struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar).set_default(0).describe("The scalar value for assignment.");
DMLC_DECLARE_FIELD(begin).describe(
"starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end).describe(
"ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape))
return false;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template <int ndim>
struct slice_assign_scalar {
// i is the i-th row after flattening out into 2D tensor
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType val,
const OpReqType req,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim - 1];
const index_t out_last_dim_size = vshape[ndim - 1];
const index_t step_last_dim = step[ndim - 1];
const index_t begin_last_dim = begin[ndim - 1];
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val);
}
}
};
template <typename xpu>
void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace";
}
mxnet::TShape vshape = data.shape_;
const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape =
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspaced needs no operation.
}
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s,
vshape.FlatTo2D()[0],
out.dptr<DType>(),
static_cast<DType>(param.scalar),
req[0],
out.shape_.get<ndim>(),
vshape.get<ndim>(),
begin,
step);
})
})
}
struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis).describe("Axis along which to be sliced, supports negative indexes.");
DMLC_DECLARE_FIELD(begin).describe(
"The beginning index along the axis to be sliced, "
" supports negative indexes.");
DMLC_DECLARE_FIELD(end).describe(
"The ending index along the axis to be sliced, "
" supports negative indexes.");
}
};
inline void GetSliceAxisParams(const SliceAxisParam& param,
const mxnet::TShape& ishape,
int* axis,
index_t* begin,
index_t* end) {
*axis = param.axis;
if (*axis < 0) {
*axis += ishape.ndim();
}
CHECK(*axis < ishape.ndim() && *axis >= 0)
<< "Transformed axis must be smaller than the source ndim and larger than zero! Recieved "
"axis="
<< param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis;
index_t axis_size = static_cast<index_t>(ishape[*axis]);
*begin = param.begin;
*end = -1;
if (*begin < 0) {
*begin += axis_size;
}
if (axis_size > 0) {
if (!static_cast<bool>(param.end)) {
*end = axis_size;
} else {
*end = param.end.value();
if (*end < 0) {
*end += axis_size;
}
}
CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size;
CHECK((*begin < *end)) << "Invalid begin, end, get begin=" << param.begin
<< ", end=" << param.end;
} else {
*begin = 0;
*end = 0;
}
CHECK(*end >= 0) << "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin;
}
inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape))
return false;
int axis;
index_t begin, end;
GetSliceAxisParams(param, ishape, &axis, &begin, &end);
if (!mxnet::dim_size_is_known(ishape, axis)) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return false;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = static_cast<index_t>(end - begin);
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return shape_is_known(shape);
}
template <typename xpu>
void SliceAxis(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow::expr;
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> in = inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out = outputs[0].FlatTo2D<xpu, DType>(s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> in = inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> out = outputs[0].FlatTo3D<xpu, DType>(axis, s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
}
}
// Backward pass of broadcast over the given axis
template <typename xpu>
void SliceAxisGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (outputs[0].shape_.Size() == 0) {
return;
}
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
using namespace mshadow::op;
using namespace mshadow::expr;
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].shape_.ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> ograd = inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> igrad = outputs[0].FlatTo2D<xpu, DType>(s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> ograd = inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> igrad = outputs[0].FlatTo3D<xpu, DType>(axis, s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
}
}
struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> {
mxnet::Tuple<int> axes;
DMLC_DECLARE_PARAMETER(SliceLikeParam) {
DMLC_DECLARE_FIELD(axes)
.set_default(mxnet::Tuple<int>())
.describe(
"List of axes on which input data will be sliced according to the "
"corresponding size of the second input. By default will slice on "
"all axes. Negative axes are supported.");
}
};
inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& from_shape = (*in_attrs)[1];
if (!mxnet::ndim_is_known(ishape) || !mxnet::ndim_is_known(from_shape)) {
return false;
}
if (param.axes.ndim() == 0) {
CHECK_EQ(ishape.ndim(), from_shape.ndim())
<< "By default slice_axis performs slice on all axes, but ndim mismatch "
"for inputs: "
<< ishape.ndim() << " vs. " << from_shape.ndim();
for (int i = 0; i < ishape.ndim(); ++i) {
CHECK_GE(ishape[i], from_shape[i]) << "Slice axis " << i << " with size " << from_shape[i]
<< "exceeds limit of input with size " << ishape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape);
} else {
mxnet::TShape shape(ishape);
for (int i = 0; i < param.axes.ndim(); ++i) {
int axis = param.axes[i];
if (axis < 0) {
axis += ishape.ndim();
}
CHECK_GE(axis, 0) << "Slice axis: " << param.axes[i] << " too small";
CHECK_GT(ishape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds first input: " << ishape.ndim();
CHECK_GT(from_shape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim();
shape[axis] = from_shape[axis];
CHECK_GE(ishape[axis], from_shape[axis])
<< "Slice axis " << axis << " with size " << from_shape[axis]
<< "exceeds limit of input with size " << ishape[axis];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return true;
}
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
pe[i] = fshape[i];
ps[i] = 1;
}
} else {
for (int i = 0; i < axes.ndim(); ++i) {
int axis = axes[i];
if (axis < 0) {
axis += dshape.ndim();
}
CHECK_GE(axis, 0) << "Slice axis: " << axes[i] << " too small";
CHECK_LT(axis, dshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << dshape.ndim();
CHECK_LT(axis, fshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << fshape.ndim();
pb[axis] = 0;
pe[axis] = fshape[axis];
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}
template <typename xpu>
void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow::expr;
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(
s,
num_threads,
out.dptr<DType>(),
data.dptr<DType>(),
data.shape_.get<ndim>(),
out.shape_.get<ndim>(),
begin,
step);
})})
})
}
template <typename xpu>
void SliceLikeBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req.size(), 2U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
if (req[1] != kNullOp && req[1] != kAddTo) {
Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients.
}
if (req[0] == kNullOp)
return;
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_like_backward does not support kWriteInplace";
}
const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(
s,
num_threads,
igrad.dptr<DType>(),
ograd.dptr<DType>(),
igrad.shape_.get<ndim>(),
ograd.shape_.get<ndim>(),
begin,
step);
})})
})
}
struct ClipParam : public dmlc::Parameter<ClipParam> {
real_t a_min, a_max;
DMLC_DECLARE_PARAMETER(ClipParam) {
DMLC_DECLARE_FIELD(a_min).describe("Minimum value");
DMLC_DECLARE_FIELD(a_max).describe("Maximum value");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream a_min_s, a_max_s;
a_min_s << a_min;
a_max_s << a_max;
(*dict)["a_min"] = a_min_s.str();
(*dict)["a_max"] = a_max_s.str();
}
};
struct clip {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* datas,
const float a_min,
const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* grad,
const DType* datas,
const float a_min,
const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
template <typename xpu>
void Clip(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu>* s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s,
outputs[0].Size(),
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
param.a_min,
param.a_max);
});
}
template <typename xpu>
void ClipEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs[0].dtype(), outputs[0].dtype());
CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type());
CHECK_NE(inputs[0].storage_type(), kDefaultStorage);
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>);
}
template <typename xpu>
void ClipGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu>* s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<clip_grad, xpu>::Launch(s,
outputs[0].Size(),
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<DType>(),
param.a_min,
param.a_max);
});
}
/*!
* \brief The parameters of the repeat operator include
* the number of repeating time and axis (optional).
* The parameters will be later used to deduce the
* output ndarray shape in bool RepeatShape() function.
*/
struct RepeatParam : public dmlc::Parameter<RepeatParam> {
int repeats = 1;
dmlc::optional<int> axis;
DMLC_DECLARE_PARAMETER(RepeatParam) {
DMLC_DECLARE_FIELD(repeats).describe("The number of repetitions for each element.");
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>())
.describe(
"The axis along which to repeat values."
" The negative numbers are interpreted counting from the backward."
" By default, use the flattened input array,"
" and return a flat output array.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream repeats_s, axis_s;
repeats_s << repeats;
axis_s << axis;
(*dict)["repeats"] = repeats_s.str();
(*dict)["axis"] = axis_s.str();
}
};
/*!
* \brief Helper function for getting user input params for the operator repeat.
* Sanity check the user input values.
*/
inline void GetRepeatParams(const RepeatParam& param,
const mxnet::TShape& ishape,
int* repeats,
dmlc::optional<int>* axisOpt) {
*repeats = param.repeats;
CHECK_GE(*repeats, 0) << "repeats cannot be a negative number";
*axisOpt = param.axis;
if (static_cast<bool>(*axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt->value();
if (axis < 0) {
axis += ndims;
}
CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds";
}
}
inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) {
return false;
}
int repeats = 0;
dmlc::optional<int> axisOpt;
GetRepeatParams(param, ishape, &repeats, &axisOpt);
// If 0 repeats, return an empty 1-dim, 0-size array
if (0 == repeats) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0));
return true;
}
// If repeats > 0, multiply the size of the corresponding axis by repeats
if (static_cast<bool>(axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt.value();
if (axis < 0) {
axis += ndims;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = repeats * ishape[i];
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
} else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats
mxnet::TShape shape(1, ishape.Size() * repeats);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return shape_is_known(out_attrs->at(0));
}
inline bool RepeatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the funcitonality
* of operator repeat.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp(
const mxnet::TShape& ishape,
const dmlc::optional<int>& axisOpt,
const int repeats) {
if (static_cast<bool>(axisOpt)) {
int axis = axisOpt.value();
int ndim = ishape.ndim();
if (axis < 0) {
axis += ndim;
}
CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis";
// reshape the input tensor by adding a dim at the (axis+1)-th dim
mxnet::TShape rshape(ishape.ndim() + 1, 1);
// the shape we want to broadcast to
mxnet::TShape bshape(rshape.ndim(), 1);
int i = 0;
while (i <= axis) {
rshape[i] = bshape[i] = ishape[i];
++i;
}
rshape[i] = 1;
bshape[i] = repeats;
while (i < ishape.ndim()) {
rshape[i + 1] = ishape[i];
bshape[i + 1] = ishape[i];
++i;
}
return std::make_pair(rshape, bshape);
} else {
// axis is not input by user
// reshape the tensor into shape (ishape.Size(), 1)
// then add one dim at axis = 1 and broadcast to
// shape (ishape.Size(), repeats)
mxnet::TShape rshape(2, 1);
rshape[0] = ishape.Size();
rshape[1] = 1;
mxnet::TShape bshape(2, 1);
bshape[0] = rshape[0];
bshape[1] = repeats;
return std::make_pair(rshape, bshape);
}
}
template <typename xpu>
void RepeatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TBlob& iTBlob = inputs[0];
const mxnet::TShape& ishape = iTBlob.shape_;
if (!shape_is_known(ishape))
return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, ishape, &repeats, &axisOpt);
if (0 == repeats)
return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_,
rshapes.first,
inputs[0].dev_mask(),
inputs[0].type_flag_,
inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_,
rshapes.second,
outputs[0].dev_mask(),
outputs[0].type_flag_,
outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template <typename xpu>
void RepeatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& oshape = outputs[0].shape_;
if (!shape_is_known(oshape))
return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, oshape, &repeats, &axisOpt);
if (0 == repeats)
return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_,
rshapes.first,
outputs[0].dev_mask(),
outputs[0].type_flag_,
outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_,
rshapes.second,
inputs[0].dev_mask(),
inputs[0].type_flag_,
inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
#if !defined(__CUDACC__)
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
#else
ReduceAxesRTCComputeImpl(
ctx, newInputs, req, newOutputs, rshapes.first, "red::sum{}", nullptr, false);
#endif
}
struct TileParam : public dmlc::Parameter<TileParam> {
mxnet::Tuple<int> reps;
DMLC_DECLARE_PARAMETER(TileParam) {
DMLC_DECLARE_FIELD(reps).describe(
"The number of times for repeating the tensor a. Each dim size of reps"
" must be a positive integer."
" If reps has length d, the result will have dimension of max(d, a.ndim);"
" If a.ndim < d, a is promoted to be d-dimensional by prepending new axes."
" If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream reps_s;
reps_s << reps;
(*dict)["reps"] = reps_s.str();
}
};
inline bool TileOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const TileParam& param = nnvm::get<TileParam>(attrs.parsed);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) {
return false;
}
const mxnet::Tuple<int>& reps = param.reps;
// If reps is empty, return a identical input array
if (reps.ndim() == 0) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return true;
}
mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = oshape.ndim() - 1; i >= 0; --i) {
if (i1 >= 0 && i2 >= 0) {
oshape[i] = ishape[i1--] * reps[i2--];
} else if (i1 >= 0) {
oshape[i] = ishape[i1--];
} else if (i2 >= 0) {
oshape[i] = reps[i2--];
}
}
// If reps contains 0s, oshape is a zero-size shape.
// Need to distinguish between np_shape mode and legacy mode.
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&oshape);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
inline bool TileOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the functionality
* of operator tile.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp(
const mxnet::TShape& ishape,
const mxnet::Tuple<int>& reps) {
if (reps.ndim() == 0) {
return std::make_pair(ishape, ishape);
}
// The shape we want to broadcast to
mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1);
// The shape of the input tensor after adding new axes before each dim
mxnet::TShape rshape(bshape.ndim(), 1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = bshape.ndim() - 1; i >= 0; --i) {
if (0 == (i & 1)) {
bshape[i] = (i2 >= 0 ? reps[i2--] : 1);
rshape[i] = 1;
} else {
rshape[i] = bshape[i] = (i1 >= 0 ? ishape[i1--] : 1);
}
}
return std::make_pair(rshape, bshape);
}
/*!
* \brief Implementation of tiling the input tensor a based
* on the user-input shape, reps.
* If a.ndim < reps.ndim, new axes are pre-pended to a. For example,
* the input tensor has shape (3,), and the reps is (2, 4); the input
* tensor would be reshaped to (1, 3).
* If a.ndim > reps.ndim, pre-pending 1's to reps. For example,
* the input tensor has shape (2, 3, 4, 5), and reps is (2, 2);
* the reps would be changed to (1, 1, 2, 2).
* Suppose we have a.ndim = reps.ndim now. To achieve tiling,
* we utilize the operator broadcast_to. For example, for a tensor
* of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape
* the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding
* one axis before each dimension. Then, we want to broadcast
* the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final
* output tensor would have shape (2*2, 8*3, 9*4, 3*5).
*/
template <typename xpu>
void TileOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0)
return;
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i])
return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_,
rshapes.first,
inputs[0].dev_mask(),
inputs[0].type_flag_,
inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_,
rshapes.second,
outputs[0].dev_mask(),
outputs[0].type_flag_,
outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template <typename xpu>
void TileOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0)
return;
const mxnet::TShape& oshape = outputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i])
return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_,
rshapes.first,
outputs[0].dev_mask(),
outputs[0].type_flag_,
outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_,
rshapes.second,
inputs[0].dev_mask(),
inputs[0].type_flag_,
inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
#if !defined(__CUDACC__)
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
#else
ReduceAxesRTCComputeImpl(
ctx, newInputs, req, newOutputs, rshapes.first, "red::sum{}", nullptr, false);
#endif
}
struct ReverseParam : public dmlc::Parameter<ReverseParam> {
mxnet::Tuple<int> axis;
DMLC_DECLARE_PARAMETER(ReverseParam) {
DMLC_DECLARE_FIELD(axis).describe("The axis which to reverse elements.");
}
};
#define REVERSE_MAX_DIM 10U
struct reverse {
MSHADOW_XINLINE static index_t ReverseIndex(index_t idx,
index_t nreversedim,
const index_t* stride_,
const index_t* trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high % stride_[i];
high /= stride_[i];
outputIndex = (high * stride_[i] + stride_[i] - 1 - x) * trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template <typename DType>
__device__ static void Map(index_t index,
index_t nreversedim,
const DType* src,
DType* dst,
const index_t* stride_,
const index_t* trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template <typename DType>
MSHADOW_XINLINE static void Map(index_t index,
index_t nreversedim,
const DType* src,
DType* dst,
const index_t* stride_,
const index_t* trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
template <typename xpu>
void ReverseOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM);
Stream<xpu>* s = ctx.get_stream<xpu>();
const mxnet::TShape& ishape = inputs[0].shape_;
std::vector<index_t> stride_(param.axis.ndim());
std::vector<index_t> trailing_(param.axis.ndim());
index_t reverse_index = 0;
for (int axis : param.axis) {
CHECK_LT(axis, ishape.ndim());
stride_[reverse_index] = ishape[axis];
trailing_[reverse_index] = 1;
for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
trailing_[reverse_index] *= ishape[i2];
}
reverse_index++;
}
#ifdef __CUDACC__
mshadow::Tensor<xpu, 1, uint8_t> workspace = ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace,
thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace,
thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
#endif
#ifdef __CUDACC__
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s,
inputs[0].Size(),
reverse_index,
inputs[0].dptr<DType>(),
outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace),
reinterpret_cast<index_t*>(trailing_workspace));
});
#else
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s,
inputs[0].Size(),
reverse_index,
inputs[0].dptr<DType>(),
outputs[0].dptr<DType>(),
stride_.data(),
trailing_.data());
});
#endif
}
struct StackParam : public dmlc::Parameter<StackParam> {
int axis;
int num_args;
DMLC_DECLARE_PARAMETER(StackParam) {
DMLC_DECLARE_FIELD(axis).set_default(0).describe(
"The axis in the result array along which the input arrays are stacked.");
DMLC_DECLARE_FIELD(num_args).set_lower_bound(1).describe("Number of inputs to be stacked.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, num_args_s;
axis_s << axis;
num_args_s << num_args;
(*dict)["axis"] = axis_s.str();
(*dict)["num_args"] = num_args_s.str();
}
};
inline bool StackOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
mxnet::TShape dshape;
for (const mxnet::TShape& i : (*in_attrs)) {
shape_assign(&dshape, i);
}
if (!shape_is_known(dshape))
return false;
mxnet::TShape oshape(dshape.ndim() + 1, -1);
int axis = CheckAxis(param.axis, oshape.ndim());
for (int i = 0; i < axis; ++i) {
oshape[i] = dshape[i];
}
oshape[axis] = param.num_args;
for (index_t i = axis + 1; i < oshape.ndim(); ++i) {
oshape[i] = dshape[i - 1];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
template <typename xpu>
void StackOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, outputs[0].ndim());
Stream<xpu>* s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType>> data(inputs.size());
Tensor<xpu, 3, DType> out;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= outputs[0].shape_[i];
}
for (int i = axis + 1; i < outputs[0].ndim(); ++i) {
trailing *= outputs[0].shape_[i];
}
size_t mid = outputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < inputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Concatenate(data, &out, 1, req[0]);
})
}
template <typename xpu>
void StackOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
Stream<xpu>* s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType>> grad_in(outputs.size());
Tensor<xpu, 3, DType> grad;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= inputs[0].shape_[i];
}
for (int i = axis + 1; i < inputs[0].ndim(); ++i) {
trailing *= inputs[0].shape_[i];
}
size_t mid = inputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < outputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Split(grad, &grad_in, 1, req);
})
}
struct SqueezeParam : public dmlc::Parameter<SqueezeParam> {
dmlc::optional<mxnet::Tuple<int>> axis;
DMLC_DECLARE_PARAMETER(SqueezeParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<mxnet::Tuple<int>>())
.describe(
"Selects a subset of the single-dimensional entries in the shape."
" If an axis is selected with shape entry greater than one, an error is raised.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
// Given a shape that may have dim size equal to 0,
// move all the zeros to the last of the shape array
// and keep the relative order of the non-zero values.
// Returns the new shape size after moving all zeros to the end.
inline size_t SqueezeShapeHelper(mxnet::TShape* shape) {
CHECK(shape != nullptr);
size_t count = 0;
for (int i = 0; i < shape->ndim(); ++i) {
if ((*shape)[i] == -1) {
++count;
} else {
std::swap((*shape)[i], (*shape)[i - count]);
}
}
return shape->ndim() - count;
}
inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = in_attrs->at(0);
const int dndim = dshape.ndim();
if (!shape_is_known(dshape))
return false;
mxnet::TShape oshape = dshape;
if (param.axis.has_value()) {
// preprocess axis
mxnet::Tuple<int> axes = param.axis.value();
for (int i = 0; i < axes.ndim(); ++i) {
if (axes[i] < 0) {
axes[i] += dndim;
CHECK_GE(axes[i], 0) << "axis " << axes[i] - dndim
<< " is out of bounds for array of dimension " << dndim;
}
CHECK_LT(axes[i], dndim) << "axis " << axes[i] << " is out of bounds for array of dimension "
<< dndim;
CHECK_EQ(dshape[axes[i]], 1)
<< "cannot select an axis to squeeze out which has size=" << dshape[axes[i]]
<< " not equal to one";
CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis";
oshape[axes[i]] = -1;
}
} else {
for (int i = 0; i < oshape.ndim(); ++i) {
if (oshape[i] == 1)
oshape[i] = -1;
}
}
size_t oshape_size = SqueezeShapeHelper(&oshape);
if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1)
oshape[0] = 1;
oshape_size = 1;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data() + oshape_size));
return true;
}
struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
int block_size;
DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
DMLC_DECLARE_FIELD(block_size).describe("Blocks of [block_size. block_size] are moved");
}
};
inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor";
mxnet::TShape expected_out(4, -1);
mxnet::TShape& in_shape = in_attrs->at(0);
if (!mxnet::ndim_is_known(in_shape)) {
return false;
}
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_EQ(in_shape[1] % (block * block), 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:1(depth dimension) should be a multiple of 'block^2'";
CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] / (block * block);
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] * block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function updates the value of input index from where the data element
* needs to be fetched and written out to the ith location in output tensor
* \param index_position index within offset array to get offset of given dimension
* \param dim_size size of current dimension
* \param idx output tensor index
* \param inp_index index within input tensor from where value is retrieved
* \param offset_arr array containing the linear offset of input tensor
*/
MSHADOW_XINLINE void update_index(index_t index_position,
index_t dim_size,
index_t* idx,
index_t* inp_index,
const index_t* offset_arr) {
index_t next_idx_val = *idx / dim_size;
*inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
*idx = next_idx_val;
}
/*!
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template <int req>
struct depth_to_space_forward {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out_data,
const DType* in_data,
const int block,
const index_t* size,
const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[3];
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2];
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1] / (block * block);
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing depth_to_space operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template <int req>
struct compute_offset_for_depth_to_space {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* offset_arr,
DType* size,
const int block,
const index_t size0,
const index_t size1,
const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * size[3];
offset_arr[3] = offset_arr[4] * size[2];
offset_arr[2] = offset_arr[3] * size[1] / (block * block);
offset_arr[1] = offset_arr[2] * block;
offset_arr[0] = offset_arr[1] * block;
}
};
template <typename xpu>
void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(s,
1,
offset_arr,
size,
block,
in_data.shape_[0],
in_data.shape_[1],
in_data.shape_[2],
in_data.shape_[3]);
Kernel<depth_to_space_forward<req_type>, xpu>::Launch(s,
out_data.Size(),
out_data.dptr<DType>(),
in_data.dptr<DType>(),
block,
size,
offset_arr);
});
});
}
inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor";
mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1);
mxnet::TShape& in_shape = in_attrs->at(0);
if (!mxnet::ndim_is_known(in_shape)) {
return false;
}
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] * block * block;
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] / block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template <int req>
struct space_to_depth_forward {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out_data,
const DType* in_data,
const int block,
const index_t* size,
const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = size[3] / block;
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2] / block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1];
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing space_to_depth operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template <int req>
struct compute_offset_for_space_to_depth {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* offset_arr,
DType* size,
const int block,
const index_t size0,
const index_t size1,
const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * block;
offset_arr[3] = offset_arr[4] * size[3] / block;
offset_arr[2] = offset_arr[3] * block;
offset_arr[1] = offset_arr[2] * size[2] / block;
offset_arr[0] = offset_arr[1] * size[1];
}
};
template <typename xpu>
void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch(s,
1,
offset_arr,
size,
block,
in_data.shape_[0],
in_data.shape_[1],
in_data.shape_[2],
in_data.shape_[3]);
Kernel<space_to_depth_forward<req_type>, xpu>::Launch(s,
out_data.Size(),
out_data.dptr<DType>(),
in_data.dptr<DType>(),
block,
size,
offset_arr);
});
});
}
namespace split_enum {
enum SplitOpInputs { kData };
} // namespace split_enum
struct SplitParam : public dmlc::Parameter<SplitParam> {
mxnet::TShape indices;
int axis;
bool squeeze_axis;
int sections;
DMLC_DECLARE_PARAMETER(SplitParam) {
DMLC_DECLARE_FIELD(indices).describe(
"Indices of splits. The elements should denote the boundaries of at which split"
" is performed along the `axis`.");
DMLC_DECLARE_FIELD(axis).set_default(1).describe("Axis along which to split.");
DMLC_DECLARE_FIELD(squeeze_axis)
.set_default(0)
.describe(
"If true, Removes the axis with length 1 from the shapes of the output arrays."
" **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1"
" only along the `axis` which it is split."
" Also `squeeze_axis` can be set to ``true``"
" only if ``input.shape[axis] == num_outputs``.");
DMLC_DECLARE_FIELD(sections).set_default(0).describe(
"Number of sections if equally splitted. Default to 0 which means split by indices.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream indices_s, axis_s, squeeze_axis_s, sections_s;
indices_s << indices;
axis_s << axis;
squeeze_axis_s << squeeze_axis;
sections_s << sections;
(*dict)["indices"] = indices_s.str();
(*dict)["axis"] = axis_s.str();
(*dict)["squeeze_axis"] = squeeze_axis_s.str();
(*dict)["sections"] = sections_s.str();
}
}; // struct SplitParam
inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) {
mxnet::TShape indices(sections + 1, -1);
indices[0] = 0;
int64_t section_size_b = (int64_t)(ishape[axis] / sections);
int64_t section_size_a = section_size_b + 1;
int section_a = ishape[axis] % sections;
for (int i = 0; i < sections; ++i) {
if (i < section_a) {
indices[i + 1] = section_size_a * (i + 1);
} else {
indices[i + 1] = section_size_b + indices[i];
}
}
return indices;
}
inline bool SplitOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
int dtype = (*in_attrs)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
out_attrs->clear();
int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim();
for (int i = 0; i < num_outputs; ++i) {
out_attrs->push_back(dtype);
}
return true;
}
inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs,
const int real_axis) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
mxnet::TShape ishape = in_attrs->at(split_enum::kData);
const mxnet::TShape indices =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim();
// Pre-compute squeezed output shape for future usage
mxnet::TShape squeezed_dshape = dshape;
for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) {
squeezed_dshape[d] = squeezed_dshape[d + 1];
}
squeezed_dshape =
mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim() - 1]);
// Assign shape to every output
for (int i = 0; i < num_outputs; ++i) {
index_t start = indices[i];
index_t end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis];
if (ishape[real_axis] == 0U) {
end = start;
} else {
CHECK(start <= end) << "start " << start << " is not less than end " << end << "for subarray "
<< i;
CHECK(end <= ishape[real_axis])
<< "end " << end << " is no less than the size of the axis " << ishape[real_axis];
}
dshape[real_axis] = (end - start);
if (param.squeeze_axis) {
CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start;
SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape);
}
}
mxnet::TShape back_calculate_dshape = ishape;
back_calculate_dshape[real_axis] = 0;
for (int d = 0; d < real_axis; ++d) {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
if (param.squeeze_axis) {
back_calculate_dshape[real_axis] = num_outputs;
} else {
for (int i = 0; i < num_outputs; ++i) {
back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis];
}
}
for (int d = real_axis + 1; d < ishape.ndim(); ++d) {
if (param.squeeze_axis) {
back_calculate_dshape[d] = (*out_attrs)[0][d - 1];
} else {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape);
return true;
}
inline bool SplitOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
if (!mxnet::ndim_is_known(dshape))
return false;
if (param.axis >= 0) {
CHECK_LT(param.axis, dshape.ndim());
} else {
CHECK_LT(param.axis + dshape.ndim(), dshape.ndim());
}
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += dshape.ndim();
}
return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis);
}
struct SplitKernel {
/*!
* \brief Map function for forward split_v2 operator
* \param i global thread id
* \param in_data ptr to input buffer
* \param out_data ptr to ptr of outputs buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template <typename DType>
static MSHADOW_XINLINE void Map(size_t i,
const DType* in_data,
DType** out_data,
const size_t* indices,
const size_t num_sections,
const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t target = 0;
for (size_t section = 0; section < num_sections && indices[section] <= idx;
target = section++) {
}
DType* target_data = out_data[target];
const size_t mid_idx = idx - indices[target];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[target + 1] - indices[target];
const size_t target_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
target_data[target_idx] = in_data[i];
}
};
struct ConcatenateKernel {
/*!
* \brief Map function for backward split_v2 operator
* \param i global thread id
* \param out_grad ptr to ptr of out grads buffer
* \param in_grad ptr to input grad buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template <typename DType>
static MSHADOW_XINLINE void Map(size_t i,
DType** out_grad,
DType* in_grad,
const size_t* indices,
const size_t num_sections,
const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t src = 0;
for (size_t section = 0; section < num_sections && indices[section] <= idx; src = section++) {
}
DType* src_grad = out_grad[src];
const size_t mid_idx = idx - indices[src];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[src + 1] - indices[src];
const size_t src_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
in_grad[i] = src_grad[src_idx];
}
};
template <typename xpu>
inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& input_data = inputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_data.ndim());
size_t mid = input_data.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_data.shape_[i];
}
for (int i = real_axis + 1; i < input_data.ndim(); ++i) {
trailing *= input_data.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_data.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, {
std::vector<DType*> output_data;
for (const TBlob& data : outputs) {
output_data.push_back(data.dptr<DType>());
}
workspace_size += output_data.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(reinterpret_cast<size_t*>(workspace.dptr_),
Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(output_data.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<SplitKernel, xpu>::Launch(s,
input_data.Size(),
input_data.dptr<DType>(),
ptrs_xpu_tensor.dptr_,
indices_xpu_tensor.dptr_,
indices.size() - 1,
mid,
trailing);
});
}
template <typename xpu>
inline void SplitOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim());
const TBlob& input_data = inputs[split_enum::kData];
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_data.ndim();
}
SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
template <typename xpu>
inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu>* s = ctx.get_stream<xpu>();
TBlob input_grad = outputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_grad.ndim());
size_t mid = input_grad.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_grad.shape_[i];
}
for (int i = real_axis + 1; i < input_grad.ndim(); ++i) {
trailing *= input_grad.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_grad.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, {
std::vector<DType*> out_grads;
for (const TBlob& output_grad : inputs) {
out_grads.push_back(output_grad.dptr<DType>());
}
workspace_size += out_grads.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(reinterpret_cast<size_t*>(workspace.dptr_),
Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(inputs.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<ConcatenateKernel, xpu>::Launch(s,
input_grad.Size(),
ptrs_xpu_tensor.dptr_,
input_grad.dptr<DType>(),
indices_xpu_tensor.dptr_,
indices.size() - 1,
mid,
trailing);
});
}
template <typename xpu>
inline void SplitOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim())
<< "out grad vector size mush match the output size";
CHECK_EQ(outputs.size(), 1U);
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += outputs[split_enum::kData].ndim();
}
SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) {
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
return (param.sections > 0) ? param.sections : param.indices.ndim();
}
} // namespace op
} // namespace mxnet
namespace std {
template <>
struct hash<mxnet::op::TransposeParam> {
size_t operator()(const mxnet::op::TransposeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axes);
return ret;
}
};
template <>
struct hash<mxnet::op::ReshapeParam> {
size_t operator()(const mxnet::op::ReshapeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.target_shape);
ret = dmlc::HashCombine(ret, val.keep_highest);
ret = dmlc::HashCombine(ret, val.shape);
ret = dmlc::HashCombine(ret, val.reverse);
return ret;
}
};
template <>
struct hash<mxnet::op::ExpandDimParam> {
size_t operator()(const mxnet::op::ExpandDimParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
|
computepi.c | #include "computepi.h"
#include <immintrin.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
double compute_pi_baseline(size_t N)
{
double pi = 0.0;
double dt = 1.0 / N; // dt = (b-a)/N, b = 1, a = 0
for (size_t i = 0; i < N; i++) {
double x = (double) i / N; // x = ti = a+(b-a)*i/N = i/N
pi += dt / (1.0 + x * x); // integrate 1/(1+x^2), i = 0....N
}
return pi * 4.0;
}
double compute_pi_openmp(size_t N, int threads)
{
double pi = 0.0;
double dt = 1.0 / N;
double x;
#pragma omp parallel num_threads(threads)
{
#pragma omp for private(x) reduction(+ : pi)
for (size_t i = 0; i < N; i++) {
x = (double) i / N;
pi += dt / (1.0 + x * x);
}
}
return pi * 4.0;
}
double compute_pi_avx(size_t N)
{
double pi = 0.0;
double dt = 1.0 / N;
register __m256d ymm0, ymm1, ymm2, ymm3, ymm4;
ymm0 = _mm256_set1_pd(1.0);
ymm1 = _mm256_set1_pd(dt);
ymm2 = _mm256_set_pd(dt * 3, dt * 2, dt * 1, 0.0);
ymm4 = _mm256_setzero_pd(); // sum of pi
for (int i = 0; i <= N - 4; i += 4) {
ymm3 = _mm256_set1_pd(i * dt); // i*dt, i*dt, i*dt, i*dt
ymm3 = _mm256_add_pd(
ymm3, ymm2); // x = i*dt+3*dt, i*dt+2*dt, i*dt+dt, i*dt+0.0
ymm3 = _mm256_mul_pd(ymm3,
ymm3); // x^2 = (i*dt+3*dt)^2, (i*dt+2*dt)^2, ...
ymm3 = _mm256_add_pd(
ymm0, ymm3); // 1+x^2 = 1+(i*dt+3*dt)^2, 1+(i*dt+2*dt)^2, ...
ymm3 = _mm256_div_pd(ymm1, ymm3); // dt/(1+x^2)
ymm4 = _mm256_add_pd(ymm4, ymm3); // pi += dt/(1+x^2)
}
double tmp[4] __attribute__((aligned(32)));
_mm256_store_pd(tmp, ymm4); // move packed float64 values to 256-bit
// aligned memory location
pi += tmp[0] + tmp[1] + tmp[2] + tmp[3];
return pi * 4.0;
}
double compute_pi_avx_unroll(size_t N)
{
double pi = 0.0;
double dt = 1.0 / N;
register __m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7, ymm8, ymm9,
ymm10, ymm11, ymm12, ymm13, ymm14;
ymm0 = _mm256_set1_pd(1.0);
ymm1 = _mm256_set1_pd(dt);
ymm2 = _mm256_set_pd(dt * 3, dt * 2, dt * 1, 0.0);
ymm3 = _mm256_set_pd(dt * 7, dt * 6, dt * 5, dt * 4);
ymm4 = _mm256_set_pd(dt * 11, dt * 10, dt * 9, dt * 8);
ymm5 = _mm256_set_pd(dt * 15, dt * 14, dt * 13, dt * 12);
ymm6 = _mm256_setzero_pd(); // first sum of pi
ymm7 = _mm256_setzero_pd(); // second sum of pi
ymm8 = _mm256_setzero_pd(); // third sum of pi
ymm9 = _mm256_setzero_pd(); // fourth sum of pi
for (int i = 0; i <= N - 16; i += 16) {
ymm14 = _mm256_set1_pd(i * dt);
ymm10 = _mm256_add_pd(ymm14, ymm2);
ymm11 = _mm256_add_pd(ymm14, ymm3);
ymm12 = _mm256_add_pd(ymm14, ymm4);
ymm13 = _mm256_add_pd(ymm14, ymm5);
ymm10 = _mm256_mul_pd(ymm10, ymm10);
ymm11 = _mm256_mul_pd(ymm11, ymm11);
ymm12 = _mm256_mul_pd(ymm12, ymm12);
ymm13 = _mm256_mul_pd(ymm13, ymm13);
ymm10 = _mm256_add_pd(ymm0, ymm10);
ymm11 = _mm256_add_pd(ymm0, ymm11);
ymm12 = _mm256_add_pd(ymm0, ymm12);
ymm13 = _mm256_add_pd(ymm0, ymm13);
ymm10 = _mm256_div_pd(ymm1, ymm10);
ymm11 = _mm256_div_pd(ymm1, ymm11);
ymm12 = _mm256_div_pd(ymm1, ymm12);
ymm13 = _mm256_div_pd(ymm1, ymm13);
ymm6 = _mm256_add_pd(ymm6, ymm10);
ymm7 = _mm256_add_pd(ymm7, ymm11);
ymm8 = _mm256_add_pd(ymm8, ymm12);
ymm9 = _mm256_add_pd(ymm9, ymm13);
}
double tmp1[4] __attribute__((aligned(32)));
double tmp2[4] __attribute__((aligned(32)));
double tmp3[4] __attribute__((aligned(32)));
double tmp4[4] __attribute__((aligned(32)));
_mm256_store_pd(tmp1, ymm6);
_mm256_store_pd(tmp2, ymm7);
_mm256_store_pd(tmp3, ymm8);
_mm256_store_pd(tmp4, ymm9);
pi += tmp1[0] + tmp1[1] + tmp1[2] + tmp1[3] + tmp2[0] + tmp2[1] + tmp2[2] +
tmp2[3] + tmp3[0] + tmp3[1] + tmp3[2] + tmp3[3] + tmp4[0] + tmp4[1] +
tmp4[2] + tmp4[3];
return pi * 4.0;
}
double compute_pi_leibniz(size_t N)
{
double pi = 0.0;
for (size_t i = 0; i < N; i++) {
double tmp = (i & 1) ? (-1) : 1;
pi += tmp / (2 * i + 1);
}
return pi * 4.0;
}
double compute_pi_leibniz_openmp(size_t N, int threads)
{
double pi = 0.0;
#pragma omp parallel for num_threads(threads) reduction(+ : pi)
for (size_t i = 0; i < N; i++) {
double tmp = (i & 1) ? (-1) : 1;
pi += tmp / (2 * i + 1);
}
return pi * 4.0;
}
double compute_pi_leibniz_avx(size_t N)
{
double pi = 0.0;
register __m256d ymm0, ymm1, ymm2, ymm3, ymm4;
ymm0 = _mm256_set_pd(1.0, -1.0, 1.0, -1.0);
ymm1 = _mm256_set1_pd(1.0);
ymm2 = _mm256_set1_pd(2.0);
ymm4 = _mm256_setzero_pd(); // calculation result
for (int i = 0; i <= N - 4; i += 4) {
ymm3 = _mm256_set_pd(i, i + 1.0, i + 2.0, i + 3.0);
ymm3 = _mm256_mul_pd(ymm3, ymm2); // 2*i
ymm3 = _mm256_add_pd(ymm3, ymm1); // 2*i+1
ymm3 = _mm256_div_pd(ymm0, ymm3); // (-1)^n / (2*i+1)
ymm4 = _mm256_add_pd(ymm4, ymm3);
}
double tmp[4] __attribute__((aligned(32)));
_mm256_store_pd(tmp, ymm4); // move packed float64 values to 256-bit
// aligned memory location
pi += tmp[0] + tmp[1] + tmp[2] + tmp[3];
return pi * 4.0;
}
double compute_pi_leibniz_avx_unroll(size_t N)
{
double pi = 0.0;
register __m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7, ymm8, ymm9,
ymm10;
ymm0 = _mm256_set_pd(1.0, -1.0, 1.0, -1.0);
ymm1 = _mm256_set1_pd(1.0);
ymm2 = _mm256_set1_pd(2.0);
ymm7 = _mm256_setzero_pd(); // first sum of pi
ymm8 = _mm256_setzero_pd(); // second sum of pi
ymm9 = _mm256_setzero_pd(); // third sum of pi
ymm10 = _mm256_setzero_pd(); // fourth sum of pi
for (int i = 0; i <= N - 16; i += 16) {
ymm3 = _mm256_set_pd(i, i + 1.0, i + 2.0, i + 3.0);
ymm4 = _mm256_set_pd(i + 4.0, i + 5.0, i + 6.0, i + 7.0);
ymm5 = _mm256_set_pd(i + 8.0, i + 9.0, i + 10.0, i + 11.0);
ymm6 = _mm256_set_pd(i + 12.0, i + 13.0, i + 14.0, i + 15.0);
ymm3 = _mm256_mul_pd(ymm3, ymm2); // 2*i
ymm4 = _mm256_mul_pd(ymm4, ymm2);
ymm5 = _mm256_mul_pd(ymm5, ymm2);
ymm6 = _mm256_mul_pd(ymm6, ymm2);
ymm3 = _mm256_add_pd(ymm3, ymm1); // 2*i+1
ymm4 = _mm256_add_pd(ymm4, ymm1);
ymm5 = _mm256_add_pd(ymm5, ymm1);
ymm6 = _mm256_add_pd(ymm6, ymm1);
ymm3 = _mm256_div_pd(ymm0, ymm3); // (-1)^n / (2*i+1)
ymm4 = _mm256_div_pd(ymm0, ymm4);
ymm5 = _mm256_div_pd(ymm0, ymm5);
ymm6 = _mm256_div_pd(ymm0, ymm6);
ymm7 = _mm256_add_pd(ymm7, ymm3);
ymm8 = _mm256_add_pd(ymm8, ymm4);
ymm9 = _mm256_add_pd(ymm9, ymm5);
ymm10 = _mm256_add_pd(ymm10, ymm6);
}
double tmp1[4] __attribute__((aligned(32)));
double tmp2[4] __attribute__((aligned(32)));
double tmp3[4] __attribute__((aligned(32)));
double tmp4[4] __attribute__((aligned(32)));
_mm256_store_pd(tmp1, ymm7);
_mm256_store_pd(tmp2, ymm8);
_mm256_store_pd(tmp3, ymm9);
_mm256_store_pd(tmp4, ymm10);
pi += tmp1[0] + tmp1[1] + tmp1[2] + tmp1[3] + tmp2[0] + tmp2[1] + tmp2[2] +
tmp2[3] + tmp3[0] + tmp3[1] + tmp3[2] + tmp3[3] + tmp4[0] + tmp4[1] +
tmp4[2] + tmp4[3];
return pi * 4.0;
}
double compute_pi_euler(size_t N)
{
double pi = 0.0;
for (size_t i = 1; i < N; i++) {
pi += 1.0 / (i * i);
}
return sqrt(pi * 6);
}
double compute_pi_euler_openmp(size_t N, int threads)
{
double pi = 0.0;
#pragma omp parallel for num_threads(threads) reduction(+ : pi)
for (size_t i = 1; i < N; i++) {
pi += 1.0 / (i * i);
}
return sqrt(pi * 6);
}
double compute_pi_euler_avx(size_t N)
{
double pi = 0.0;
register __m256d ymm0, ymm1, ymm2, ymm3;
ymm0 = _mm256_set1_pd(1.0);
ymm1 = _mm256_set1_pd(6.0);
ymm3 = _mm256_setzero_pd(); // calculation result
for (int i = 1; i <= N - 4; i += 4) {
ymm2 = _mm256_set_pd(i, i + 1.0, i + 2.0, i + 3.0);
ymm2 = _mm256_mul_pd(ymm2, ymm2); // i*i
ymm2 = _mm256_div_pd(ymm0, ymm2); // 1/(i*i)
ymm2 = _mm256_mul_pd(ymm1, ymm2); // 6/(i*i)
ymm3 = _mm256_add_pd(ymm3, ymm2);
}
double tmp[4] __attribute__((aligned(32)));
_mm256_store_pd(tmp, ymm3); // move packed float64 values to 256-bit
// aligned memory location
pi += tmp[0] + tmp[1] + tmp[2] + tmp[3];
return sqrt(pi);
}
double compute_pi_euler_avx_unroll(size_t N)
{
double pi = 0.0;
register __m256d ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7, ymm8, ymm9;
ymm0 = _mm256_set1_pd(1.0);
ymm1 = _mm256_set1_pd(6.0);
ymm6 = _mm256_setzero_pd(); // first sum of pi
ymm7 = _mm256_setzero_pd(); // second sum of pi
ymm8 = _mm256_setzero_pd(); // third sum of pi
ymm9 = _mm256_setzero_pd(); // fourth sum of pi
for (int i = 1; i <= N - 16; i += 16) {
ymm2 = _mm256_set_pd(i, i + 1.0, i + 2.0, i + 3.0);
ymm3 = _mm256_set_pd(i + 4.0, i + 5.0, i + 6.0, i + 7.0);
ymm4 = _mm256_set_pd(i + 8.0, i + 9.0, i + 10.0, i + 11.0);
ymm5 = _mm256_set_pd(i + 12.0, i + 13.0, i + 14.0, i + 15.0);
ymm2 = _mm256_mul_pd(ymm2, ymm2); // i*i
ymm3 = _mm256_mul_pd(ymm3, ymm3);
ymm4 = _mm256_mul_pd(ymm4, ymm4);
ymm5 = _mm256_mul_pd(ymm5, ymm5);
ymm2 = _mm256_div_pd(ymm0, ymm2); // 1/(i*i)
ymm3 = _mm256_div_pd(ymm0, ymm3);
ymm4 = _mm256_div_pd(ymm0, ymm4);
ymm5 = _mm256_div_pd(ymm0, ymm5);
ymm2 = _mm256_mul_pd(ymm1, ymm2); // 6/(i*i)
ymm3 = _mm256_mul_pd(ymm1, ymm3);
ymm4 = _mm256_mul_pd(ymm1, ymm4);
ymm5 = _mm256_mul_pd(ymm1, ymm5);
ymm6 = _mm256_add_pd(ymm6, ymm2);
ymm7 = _mm256_add_pd(ymm7, ymm3);
ymm8 = _mm256_add_pd(ymm8, ymm4);
ymm9 = _mm256_add_pd(ymm9, ymm5);
}
double tmp1[4] __attribute__((aligned(32)));
double tmp2[4] __attribute__((aligned(32)));
double tmp3[4] __attribute__((aligned(32)));
double tmp4[4] __attribute__((aligned(32)));
_mm256_store_pd(tmp1, ymm6);
_mm256_store_pd(tmp2, ymm7);
_mm256_store_pd(tmp3, ymm8);
_mm256_store_pd(tmp4, ymm9);
pi += tmp1[0] + tmp1[1] + tmp1[2] + tmp1[3] + tmp2[0] + tmp2[1] + tmp2[2] +
tmp2[3] + tmp3[0] + tmp3[1] + tmp3[2] + tmp3[3] + tmp4[0] + tmp4[1] +
tmp4[2] + tmp4[3];
return sqrt(pi);
}
|
bezier_post_utility.h | //
// Project Name: Kratos
// Last Modified by: $Author: hbui $
// Date: $Date: 2013-10-12 $
// Revision: $Revision: 1.0 $
//
//
#if !defined(KRATOS_BEZIER_POST_UTILITY_H_INCLUDED )
#define KRATOS_BEZIER_POST_UTILITY_H_INCLUDED
// System includes
#include <string>
#include <vector>
#include <iostream>
// External includes
#include <omp.h>
#include "boost/progress.hpp"
#ifdef ISOGEOMETRIC_USE_MPI
#include "mpi.h"
#endif
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "includes/element.h"
#include "includes/properties.h"
#include "includes/ublas_interface.h"
#include "includes/legacy_structural_app_vars.h"
#include "spaces/ublas_space.h"
#include "linear_solvers/linear_solver.h"
#include "utilities/openmp_utils.h"
#include "utilities/auto_collapse_spatial_binning.h"
#include "custom_geometries/isogeometric_geometry.h"
#include "custom_utilities/isogeometric_post_utility.h"
#include "isogeometric_application/isogeometric_application.h"
// #define DEBUG_LEVEL1
//#define DEBUG_LEVEL2
//#define DEBUG_MULTISOLVE
//#define DEBUG_GENERATE_MESH
// #define ENABLE_PROFILING
namespace Kratos
{
///@addtogroup IsogeometricApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template<typename TDataType>
struct BezierPostUtility_Helper
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static TDataType& CalculateOnPoint(const Variable<TDataType>& rVariable,
TDataType& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates)
{
KRATOS_THROW_ERROR(std::logic_error, "Error calling unimplemented function", __FUNCTION__)
}
};
/// Short class definition.
/**
An advanced utility to export directly the FEM mesh out from isogeometric Bezier mesh. Each Bezier element will generate its own set of FEM elements. Therefore a large amount of nodes and elements may be generated.
One shall carefully use this utility for large problem. Previously, this class is named IsogeometricPostUtility.
*/
class BezierPostUtility : public IsogeometricPostUtility
{
public:
///@name Type Definitions
///@{
typedef boost::numeric::ublas::vector<double> ValuesContainerType;
typedef boost::numeric::ublas::matrix<double> ValuesArrayContainerType;
typedef typename ModelPart::NodesContainerType NodesArrayType;
typedef typename ModelPart::ElementsContainerType ElementsArrayType;
typedef typename ModelPart::ConditionsContainerType ConditionsArrayType;
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::PointType NodeType;
typedef IsogeometricGeometry<NodeType> IsogeometricGeometryType;
typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
typedef typename NodeType::DofsContainerType DofsContainerType;
typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType;
typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType;
typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType;
typedef std::size_t IndexType;
/// Pointer definition of BezierPostUtility
KRATOS_CLASS_POINTER_DEFINITION(BezierPostUtility);
///@}
///@name Life Cycle
///@{
/// Default constructor.
BezierPostUtility()
{
}
/// Destructor.
virtual ~BezierPostUtility()
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
// Synchronize post model_part with the reference model_part
template<class TVariableType>
void TransferNodalResults(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
ModelPart& r_model_part_post) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
#endif
NodesArrayType& pTargetNodes = r_model_part_post.Nodes();
ElementsArrayType& pElements = r_model_part.Elements();
typename TVariableType::Type Results;
CoordinatesArrayType LocalPos;
int ElementId;
// #pragma omp parallel for
//TODO: check this. This is not parallelized.
for(typename NodesArrayType::ptr_iterator it = pTargetNodes.ptr_begin(); it != pTargetNodes.ptr_end(); ++it)
{
ElementId = (*it)->GetSolutionStepValue(PARENT_ELEMENT_ID);
noalias(LocalPos) = (*it)->GetSolutionStepValue(LOCAL_COORDINATES);
Results = BezierPostUtility_Helper<typename TVariableType::Type>::CalculateOnPoint(rThisVariable, Results, pElements(ElementId), LocalPos);
(*it)->GetSolutionStepValue(rThisVariable) = Results;
}
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << " s" << std::endl;
#else
std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed" << std::endl;
#endif
}
// Synchronize post model_part with the reference model_part
template<class TVariableType>
void TransferIntegrationPointResults(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
ModelPart& r_model_part_post,
LinearSolverType::Pointer pSolver) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
// firstly transfer rThisVariable from integration points of reference model_part to its nodes
TransferVariablesToNodes(pSolver, r_model_part, rThisVariable);
// secondly transfer new nodal variables results to the post model_part
TransferNodalResults(rThisVariable, r_model_part, r_model_part_post);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
// Transfer the variable to nodes for model_part
template<class TVariableType>
void TransferVariablesToNodes(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
LinearSolverType::Pointer pSolver) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
TransferVariablesToNodes(pSolver, r_model_part, rThisVariable);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
// Transfer the variable to nodes for model_part
template<class TVariableType>
void TransferVariablesToNodes(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
ElementsArrayType& ElementsArray,
LinearSolverType::Pointer pSolver) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
TransferVariablesToNodes(pSolver, r_model_part, ElementsArray, rThisVariable);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
std::stringstream buffer;
buffer << "BezierPostUtility";
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << "BezierPostUtility";
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param check_active if false the activeness of the elements will not be checked; true otherwise
* REMARKS: this subroutine will only transfer the variables to nodes connecting with the mesh defined by ElementsArray
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part, ElementsArrayType& ElementsArray,
const Variable<double>& rThisVariable,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* REMARKS: + this subroutine will transfer the variables to nodes connecting with the model_part. Shall not use this subroutine if there are many types of element in the model_part.
* + the activeness of the element will not be checked
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part,
const Variable<double>& rThisVariable,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param ncomponents number of components of the nodal vector
* @param check_active if false the activeness of the elements will not be checked; true otherwise
* REMARKS: this subroutine will only transfer the variables to nodes connecting with the mesh defined by ElementsArray
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part, ElementsArrayType& ElementsArray,
const Variable<Vector>& rThisVariable,
const std::size_t& ncomponents = 6,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* REMARKS:
* + currently this method only works with 6-components variable like STRESSES, PRESTRESS, etc
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param ncomponents number of components of the nodal vector
* REMARKS: + this subroutine will transfer the variables to nodes connecting with the model_part. Shall not use this subroutine if there are many types of element in the model_part.
* + the activeness of the element will not be checked
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part,
const Variable<Vector>& rThisVariable,
const std::size_t& ncomponents = 6,
const bool& check_active = false) const;
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
BezierPostUtility& operator=(BezierPostUtility const& rOther)
{
return *this;
}
/// Copy constructor.
BezierPostUtility(BezierPostUtility const& rOther)
{
}
///@}
}; // Class BezierPostUtility
///@}
template<>
struct BezierPostUtility_Helper<double>
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static double& CalculateOnPoint(const Variable<double>& rVariable,
double& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates);
};
template<>
struct BezierPostUtility_Helper<Vector>
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static Vector& CalculateOnPoint(const Variable<Vector>& rVariable,
Vector& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates);
};
template<>
struct BezierPostUtility_Helper<array_1d<double, 3> >
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static array_1d<double, 3>& CalculateOnPoint(const Variable<array_1d<double, 3> >& rVariable,
array_1d<double, 3>& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates);
};
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >>(std::istream& rIStream, BezierPostUtility& rThis)
{
return rIStream;
}
/// output stream function
inline std::ostream& operator <<(std::ostream& rOStream,
const BezierPostUtility& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
}// namespace Kratos.
#undef DEBUG_LEVEL1
#undef DEBUG_LEVEL2
#undef DEBUG_MULTISOLVE
#undef DEBUG_GENERATE_MESH
#undef ENABLE_PROFILING
#endif
|
ExampleReservoirs_Shared.h | /**
* grove: ExampleReservoirs_Shared.h
* Copyright (c) Torr Vision Group, University of Oxford, 2017. All rights reserved.
*/
#ifndef H_GROVE_EXAMPLERESERVOIRS_SHARED
#define H_GROVE_EXAMPLERESERVOIRS_SHARED
#include <ORUtils/PlatformIndependence.h>
#define ALWAYS_ADD_EXAMPLES 0
namespace grove {
/**
* \brief Attempts to add an example to some reservoirs.
*
* If the example is valid, we attempt to add it to each specified reservoir. If a
* reservoir is not full, then the example is added. Otherwise, if ALWAYS_ADD_EXAMPLES
* is 1, a randomly-selected existing example is discarded and replaced by the current
* example. If ALWAYS_ADD_EXAMPLES is 0, then an additional random decision is made as
* to *whether* to replace an existing example.
*
* \param example The example to attempt to add to the reservoirs.
* \param reservoirIndices The indices of the reservoirs to which to attempt to add the example.
* \param reservoirIndexCount The number of reservoirs to which to attempt to add the example.
* \param reservoirs The example reservoirs: an image in which each row allows the storage of up to reservoirCapacity examples.
* \param reservoirSizes The current size of each reservoir.
* \param reservoirAddCalls The number of times the insertion of an example has been attempted for each reservoir.
* \param reservoirCapacity The capacity (maximum size) of each reservoir.
* \param randomGenerator A random number generator.
*/
template <typename ExampleType, typename RNGType>
_CPU_AND_GPU_CODE_TEMPLATE_
inline void add_example_to_reservoirs(const ExampleType& example, const int *reservoirIndices, uint32_t reservoirIndexCount,
ExampleType *reservoirs, int *reservoirSizes, int *reservoirAddCalls, uint32_t reservoirCapacity,
RNGType& randomGenerator)
{
// If the example is invalid, early out.
if(!example.valid) return;
// Try to add the example to each specified reservoir.
for(uint32_t i = 0; i < reservoirIndexCount; ++i)
{
// The reservoir index (this corresponds to a row in the reservoirs image).
const int reservoirIdx = reservoirIndices[i];
// The raster index (in the reservoirs image) of the first example in the reservoir.
const int reservoirStartIdx = reservoirIdx * reservoirCapacity;
// Get the total number of add calls that have ever been made for the current reservoir, and increment it for next time.
uint32_t oldAddCallsCount = 0;
#ifdef __CUDACC__
oldAddCallsCount = atomicAdd(&reservoirAddCalls[reservoirIdx], 1);
#else
#ifdef WITH_OPENMP3
#pragma omp atomic capture
#elif WITH_OPENMP
#pragma omp critical
#endif
oldAddCallsCount = reservoirAddCalls[reservoirIdx]++;
#endif
// If the old total number of add calls is less than the reservoir's capacity, then we can immediately add the example.
// Otherwise, we need to decide whether or not to replace an existing example with this one.
if(oldAddCallsCount < reservoirCapacity)
{
// Store the example in the reservoir.
reservoirs[reservoirStartIdx + oldAddCallsCount] = example;
// Increment the reservoir's size. Note that it is not strictly necessary to
// maintain the reservoir sizes separately, since we can obtain the same
// information from reservoirAddCalls by clamping the values to the reservoir
// capacity, but writing it this way is much clearer and the cost in efficiency
// is limited in practice.
#ifdef __CUDACC__
atomicAdd(&reservoirSizes[reservoirIdx], 1);
#else
#ifdef WITH_OPENMP
#pragma omp atomic
#endif
++reservoirSizes[reservoirIdx];
#endif
}
else
{
#if ALWAYS_ADD_EXAMPLES
// Generate a random offset that will always result in an example being evicted from the reservoir.
const uint32_t randomOffset = randomGenerator.generate_int_from_uniform(0, reservoirCapacity - 1);
#else
// Generate a random offset that may or may not result in an example being evicted from the reservoir.
const uint32_t randomOffset = randomGenerator.generate_int_from_uniform(0, oldAddCallsCount - 1);
#endif
// If the random offset corresponds to an example in the reservoir, replace that with the new example.
if(randomOffset < reservoirCapacity)
{
reservoirs[reservoirStartIdx + randomOffset] = example;
}
}
}
}
}
#endif
|
wino_conv_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "wino_conv_kernel_x86.h"
#define TILE 4
#define ELEM_SIZE ((TILE + 2) * (TILE + 2))
#define WINO_MAX(a, b) ((a) > (b) ? (a) : (b))
#define WINO_MIN(a, b) ((a) < (b) ? (a) : (b))
static void relu(float* data, int size, int activation)
{
for (int i = 0; i < size; i++)
{
data[i] = WINO_MAX(data[i], ( float )0);
if (activation > 0)
{
data[i] = WINO_MIN(data[i], ( float )activation);
}
}
}
static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param)
{
int output_c = filter->dims[0];
int input_c = filter->dims[1];
int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float);
return trans_ker_size + 128; // caution
}
static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
static void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w);
}
}
static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
static void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w);
}
}
void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block,
float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch,
int outw, int outh, int outch, int num_thread)
{
size_t elemsize = sizeof(float);
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
float* bottom_blob_bordered = bottom_blob;
int outw_align = (outw + 3) / 4 * 4;
int outh_align = (outh + 3) / 4 * 4;
w = outw_align + 2;
h = outh_align + 2;
// BEGIN transform input
float* bottom_blob_tm = NULL;
{
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
const int tiles_n = 4 * inch * tiles;
bottom_blob_tm = transform_input;
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered + q * w * h;
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q;
float* out_tm1 = out_tm0 + tiles_n;
float* out_tm2 = out_tm0 + 2 * tiles_n;
float* out_tm3 = out_tm0 + 3 * tiles_n;
float* out_tm4 = out_tm0 + 4 * tiles_n;
float* out_tm5 = out_tm0 + 5 * tiles_n;
float* out_tm6 = out_tm0 + 6 * tiles_n;
float* out_tm7 = out_tm0 + 7 * tiles_n;
float* out_tm8 = out_tm0 + 8 * tiles_n;
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#ifdef _WIN32
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
// BEGIN dot
float* top_blob_tm = NULL;
{
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
const int tiles_n = 36 * tiles;
top_blob_tm = dot_block;
#pragma omp parallel for num_threads(num_thread)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp << 3;
float* output0_tm = top_blob_tm + tiles_n * p;
float* output1_tm = top_blob_tm + tiles_n * (p + 1);
float* output2_tm = top_blob_tm + tiles_n * (p + 2);
float* output3_tm = top_blob_tm + tiles_n * (p + 3);
float* output4_tm = top_blob_tm + tiles_n * (p + 4);
float* output5_tm = top_blob_tm + tiles_n * (p + 5);
float* output6_tm = top_blob_tm + tiles_n * (p + 6);
float* output7_tm = top_blob_tm + tiles_n * (p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm + tiles_n * p;
float* output1_tm = top_blob_tm + tiles_n * (p + 1);
float* output2_tm = top_blob_tm + tiles_n * (p + 2);
float* output3_tm = top_blob_tm + tiles_n * (p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm + 36 * tiles * p;
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr =
kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 4;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
}
}
// END dot
// BEGIN transform output
float* top_blob_bordered = NULL;
if (outw_align == outw && outh_align == outh)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered = output_bordered;
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm + 36 * tiles * p;
float* outRow0 = top_blob_bordered + outw_align * outh_align * p;
float* outRow1 = outRow0 + outw_align;
float* outRow2 = outRow0 + outw_align * 2;
float* outRow3 = outRow0 + outw_align * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw_align * 3;
outRow1 += outw_align * 3;
outRow2 += outw_align * 3;
outRow3 += outw_align * 3;
}
}
}
// END transform output
if (outw_align != outw || outh_align != outw)
{
delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0);
}
}
void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch)
{
float* kernel_tm = ( float* )sys_malloc(6 * 6 * inch * outch * sizeof(float));
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36;
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3] = {0};
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
float* kernel_tm_test = kernel_wino;
for (int r = 0; r < 9; r++)
{
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36;
const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36;
const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36;
const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36;
const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36;
const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36;
const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36;
float* ktmp = kernel_tm_test + p / 8 * inch * 32;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36;
const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36;
const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36;
float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm_test += 4 * inch * outch;
}
free(kernel_tm);
}
int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param)
{
int batch = input_tensor->dims[0];
int input_c = input_tensor->dims[1];
int input_h = input_tensor->dims[2];
int input_w = input_tensor->dims[3];
int output_c = output_tensor->dims[1];
int output_h = output_tensor->dims[2];
int output_w = output_tensor->dims[3];
int pad_h = param->pad_h0;
int pad_w = param->pad_w0;
float* kernel = ( float* )filter_tensor->data;
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
int block_h = (output_h + TILE - 1) / TILE;
int block_w = (output_w + TILE - 1) / TILE;
int block = block_h * block_w;
int padded_inh = TILE * block_h + 2;
int padded_inw = TILE * block_w + 2;
int pad_inhw = padded_inh * padded_inw;
int outw = block_w * TILE;
int outh = block_h * TILE;
priv_info->input_pad = ( float* )sys_malloc(batch * input_c * pad_inhw * sizeof(float));
memset(priv_info->input_pad, 0, batch * input_c * pad_inhw * sizeof(float));
priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * block * output_c * sizeof(float));
priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * block * input_c * sizeof(float));
priv_info->output_bordered = NULL;
if (outw != output_w || outh != output_h)
{
priv_info->output_bordered = ( float* )sys_malloc(outw * outh * output_c * sizeof(float));
}
conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c);
return 0;
}
int wino_conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (priv_info->input_pad)
{
sys_free(priv_info->input_pad);
priv_info->input_pad = NULL;
}
if (priv_info->dot_block)
{
sys_free(priv_info->dot_block);
priv_info->dot_block = NULL;
}
if (priv_info->transform_input)
{
sys_free(priv_info->transform_input);
priv_info->transform_input = NULL;
}
if (priv_info->output_bordered)
{
sys_free(priv_info->output_bordered);
priv_info->output_bordered = NULL;
}
return 0;
}
int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int act_type = param->activation;
int group = param->group;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1];
int in_c_g = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int input_size_g = in_c_g * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int out_c = output_tensor->dims[1];
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
/* wino param */
int block_h = (out_h + TILE - 1) / TILE;
int block_w = (out_w + TILE - 1) / TILE;
int block_hw = block_h * block_w;
int padded_in_h = block_h * TILE + 2;
int padded_in_w = block_w * TILE + 2;
int padded_in_hw = padded_in_h * padded_in_w;
/* buffer addr */
float* input = ( float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float* biases = NULL;
if (bias_tensor != NULL)
biases = ( float* )bias_tensor->data;
for (int i = 0; i < batch; i++)
{
for (int g = 0; g < group; g++)
{
pad_0_align_3D((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w, input + i * in_c * in_h * in_w,
in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0);
conv3x3s1_winograd43_sse((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w + g * input_size_g,
output + i * out_c * out_h * out_w, priv_info->interleave_buffer,
priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered,
biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread);
}
}
if (act_type >= 0)
{
relu(output, batch * output_size, act_type);
}
return 0;
} |
GB_binop__second_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__second_int8
// A.*B function (eWiseMult): GB_AemultB__second_int8
// A*D function (colscale): GB_AxD__second_int8
// D*A function (rowscale): GB_DxB__second_int8
// C+=B function (dense accum): GB_Cdense_accumB__second_int8
// C+=b function (dense accum): GB_Cdense_accumb__second_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_int8
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar GB_bind2nd__second_int8
// C=A'+scalar GB_bind2nd_tran__second_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = bij
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = y ;
// op is second
#define GB_OP_IS_SECOND \
1
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT8 || GxB_NO_SECOND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__second_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__second_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__second_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__second_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__second_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__second_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__second_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__second_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB_bind2nd_tran__second_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
blackscholes.c | // Copyright (c) 2007 Intel Corp.
// Black-Scholes
// Analytical method for calculating European Options
//
//
// Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice
// Hall, John C. Hull,
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
// Multi-threaded pthreads header
#ifdef ENABLE_THREADS
#define MAX_THREADS 128
// Add the following line so that icc 9.0 is compatible with pthread lib.
#define __thread __threadp
MAIN_ENV
#undef __thread
#endif
// Multi-threaded OpenMP header
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
// Multi-threaded header for Windows
#ifdef WIN32
#pragma warning(disable : 4305)
#pragma warning(disable : 4244)
#include <windows.h>
#define MAX_THREADS 128
#endif
//Precision to use for calculations
#define fptype float
#define NUM_RUNS 100
typedef struct OptionData_ {
fptype s; // spot price
fptype strike; // strike price
fptype r; // risk-free interest rate
fptype divq; // dividend rate
fptype v; // volatility
fptype t; // time to maturity or option expiration in years
// (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc)
char OptionType; // Option type. "P"=PUT, "C"=CALL
fptype divs; // dividend vals (not used in this test)
fptype DGrefval; // DerivaGem Reference Value
} OptionData;
OptionData *data;
fptype *prices;
int numOptions;
int * otype;
fptype * sptprice;
fptype * strike;
fptype * rate;
fptype * volatility;
fptype * otime;
int numError = 0;
int nThreads;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Cumulative Normal Distribution Function
// See Hull, Section 11.8, P.243-244
#define inv_sqrt_2xPI 0.39894228040143270286
fptype CNDF ( fptype InputX )
{
int sign;
fptype OutputX;
fptype xInput;
fptype xNPrimeofX;
fptype expValues;
fptype xK2;
fptype xK2_2, xK2_3;
fptype xK2_4, xK2_5;
fptype xLocal, xLocal_1;
fptype xLocal_2, xLocal_3;
// Check for negative value of InputX
if (InputX < 0.0) {
InputX = -InputX;
sign = 1;
} else
sign = 0;
xInput = InputX;
// Compute NPrimeX term common to both four & six decimal accuracy calcs
expValues = exp(-0.5f * InputX * InputX);
xNPrimeofX = expValues;
xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI;
xK2 = 0.2316419 * xInput;
xK2 = 1.0 + xK2;
xK2 = 1.0 / xK2;
xK2_2 = xK2 * xK2;
xK2_3 = xK2_2 * xK2;
xK2_4 = xK2_3 * xK2;
xK2_5 = xK2_4 * xK2;
xLocal_1 = xK2 * 0.319381530;
xLocal_2 = xK2_2 * (-0.356563782);
xLocal_3 = xK2_3 * 1.781477937;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_4 * (-1.821255978);
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_5 * 1.330274429;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_1 = xLocal_2 + xLocal_1;
xLocal = xLocal_1 * xNPrimeofX;
xLocal = 1.0 - xLocal;
OutputX = xLocal;
if (sign) {
OutputX = 1.0 - OutputX;
}
return OutputX;
}
// For debugging
void print_xmm(fptype in, char* s) {
printf("%s: %f\n", s, in);
}
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
fptype BlkSchlsEqEuroNoDiv( fptype sptprice,
fptype strike, fptype rate, fptype volatility,
fptype time, int otype, float timet )
{
fptype OptionPrice;
// local private working variables for the calculation
fptype xStockPrice;
fptype xStrikePrice;
fptype xRiskFreeRate;
fptype xVolatility;
fptype xTime;
fptype xSqrtTime;
fptype logValues;
fptype xLogTerm;
fptype xD1;
fptype xD2;
fptype xPowerTerm;
fptype xDen;
fptype d1;
fptype d2;
fptype FutureValueX;
fptype NofXd1;
fptype NofXd2;
fptype NegNofXd1;
fptype NegNofXd2;
xStockPrice = sptprice;
xStrikePrice = strike;
xRiskFreeRate = rate;
xVolatility = volatility;
xTime = time;
xSqrtTime = sqrt(xTime);
logValues = log( sptprice / strike );
xLogTerm = logValues;
xPowerTerm = xVolatility * xVolatility;
xPowerTerm = xPowerTerm * 0.5;
xD1 = xRiskFreeRate + xPowerTerm;
xD1 = xD1 * xTime;
xD1 = xD1 + xLogTerm;
xDen = xVolatility * xSqrtTime;
xD1 = xD1 / xDen;
xD2 = xD1 - xDen;
d1 = xD1;
d2 = xD2;
NofXd1 = CNDF( d1 );
NofXd2 = CNDF( d2 );
FutureValueX = strike * ( exp( -(rate)*(time) ) );
if (otype == 0) {
OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2);
} else {
NegNofXd1 = (1.0 - NofXd1);
NegNofXd2 = (1.0 - NofXd2);
OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1);
}
return OptionPrice;
}
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
#ifdef WIN32
DWORD WINAPI bs_thread(LPVOID tid_ptr){
#else
int bs_thread(void *tid_ptr) {
#endif
int i, j;
fptype price;
fptype priceDelta;
int tid = *(int *)tid_ptr;
int start = tid * (numOptions / nThreads);
int end = start + (numOptions / nThreads);
for (j=0; j<NUM_RUNS; j++) {
#ifdef ENABLE_OPENMP
#pragma omp parallel for
for (i=0; i<numOptions; i++) {
#else //ENABLE_OPENMP
for (i=start; i<end; i++) {
#endif //ENABLE_OPENMP
/* Calling main function to calculate option value based on
* Black & Sholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
priceDelta = data[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-4 ){
printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
return 0;
}
int main (int argc, char **argv)
{
FILE *file;
int i;
int loopnum;
fptype * buffer;
int * buffer2;
int rv;
#ifdef PARSEC_VERSION
#define __PARSEC_STRING(x) #x
#define __PARSEC_XSTRING(x) __PARSEC_STRING(x)
printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n");
fflush(NULL);
#else
printf("PARSEC Benchmark Suite\n");
fflush(NULL);
#endif //PARSEC_VERSION
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_blackscholes);
#endif
// if (argc != 4)
// {
// printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]);
// exit(1);
// }
nThreads = 1;
char *inputFile = argv[1];
char *outputFile = "prueba";
//Read input data from file
file = fopen(inputFile, "r");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", inputFile);
exit(1);
}
rv = fscanf(file, "%i", &numOptions);
if(rv != 1) {
printf("ERROR: Unable to read from file `%s'.\n", inputFile);
fclose(file);
exit(1);
}
if(nThreads > numOptions) {
printf("WARNING: Not enough work, reducing number of threads to match number of options.\n");
nThreads = numOptions;
}
#if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP)
if(nThreads != 1) {
printf("Error: <nthreads> must be 1 (serial version)\n");
exit(1);
}
#endif
// alloc spaces for the option data
data = (OptionData*)malloc(numOptions*sizeof(OptionData));
prices = (fptype*)malloc(numOptions*sizeof(fptype));
for ( loopnum = 0; loopnum < numOptions; ++ loopnum )
{
rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval);
if(rv != 9) {
printf("ERROR: Unable to read from file `%s'.\n", inputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", inputFile);
exit(1);
}
#ifdef ENABLE_THREADS
MAIN_INITENV(,8000000,nThreads);
#endif
printf("Num of Options: %d\n", numOptions);
printf("Num of Runs: %d\n", NUM_RUNS);
#define PAD 256
#define LINESIZE 64
buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD);
sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1));
strike = sptprice + numOptions;
rate = strike + numOptions;
volatility = rate + numOptions;
otime = volatility + numOptions;
buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD);
otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1));
for (i=0; i<numOptions; i++) {
otype[i] = (data[i].OptionType == 'P') ? 1 : 0;
sptprice[i] = data[i].s;
strike[i] = data[i].strike;
rate[i] = data[i].r;
volatility[i] = data[i].v;
otime[i] = data[i].t;
}
printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int)));
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
#ifdef ENABLE_THREADS
int tids[nThreads];
for(i=0; i<nThreads; i++) {
tids[i]=i;
CREATE_WITH_ARG(bs_thread, &tids[i]);
}
WAIT_FOR_END(nThreads);
#else//ENABLE_THREADS
#ifdef ENABLE_OPENMP
{
int tid=0;
omp_set_num_threads(nThreads);
bs_thread(&tid);
}
#else //ENABLE_OPENMP
#ifdef WIN32
if (nThreads > 1)
{
HANDLE threads[MAX_THREADS];
int nums[MAX_THREADS];
for(i=0; i<nThreads; i++) {
nums[i] = i;
threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0);
}
WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE);
} else
#endif
{
int tid=0;
bs_thread(&tid);
}
#endif //ENABLE_OPENMP
#endif //ENABLE_THREADS
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
//Write prices to output file
file = fopen(outputFile, "w");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", outputFile);
exit(1);
}
rv = fprintf(file, "%i\n", numOptions);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
for(i=0; i<numOptions; i++) {
rv = fprintf(file, "%.18f\n", prices[i]);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", outputFile);
exit(1);
}
#ifdef ERR_CHK
printf("Num Errors: %d\n", numError);
#endif
free(data);
free(prices);
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
return 0;
}
|
GB_unaryop__identity_bool_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_uint64
// op(A') function: GB_tran__identity_bool_uint64
// C type: bool
// A type: uint64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_uint64
(
bool *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
CPhotoconsistencyOdometry.h | /*
* Photoconsistency-Visual-Odometry
* Multiscale Photoconsistency Visual Odometry from RGBD Images
* Copyright (c) 2012-2013, Miguel Algaba Borrego
*
* http://code.google.com/p/photoconsistency-visual-odometry/
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the holder(s) nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _CPHOTOCONSISTENCY_ODOMETRY_
#define _CPHOTOCONSISTENCY_ODOMETRY_
#define ENABLE_OPENMP_MULTITHREADING_WARP_IMAGE 0
#include "opencv2/imgproc/imgproc.hpp"
#include "eigen3/Eigen/Dense"
#include "Matrix.h"
namespace phovo
{
template< class T >
void eigenPose( const T x, const T y, const T z,
const T yaw, const T pitch, const T roll,
Numeric::Matrix44RowMajor< T > & pose)
{
pose(0,0) = cos(yaw) * cos(pitch);
pose(0,1) = cos(yaw) * sin(pitch) * sin(roll) - sin(yaw) * cos(roll);
pose(0,2) = cos(yaw) * sin(pitch) * cos(roll) + sin(yaw) * sin(roll);
pose(0,3) = x;
pose(1,0) = sin(yaw) * cos(pitch);
pose(1,1) = sin(yaw) * sin(pitch) * sin(roll) + cos(yaw) * cos(roll);
pose(1,2) = sin(yaw) * sin(pitch) * cos(roll) - cos(yaw) * sin(roll);
pose(1,3) = y;
pose(2,0) = -sin(pitch);
pose(2,1) = cos(pitch) * sin(roll);
pose(2,2) = cos(pitch) * cos(roll);
pose(2,3) = z;
pose(3,0) = 0;
pose(3,1) = 0;
pose(3,2) = 0;
pose(3,3) = 1;
}
template< class TPixel, class TCoordinate >
void warpImage( const cv::Mat_< TPixel > & intensityImage,
const cv::Mat_< TCoordinate > & depthImage,
cv::Mat_< TPixel > & warpedIntensityImage,
const Numeric::Matrix44RowMajor< TCoordinate > & Rt,
const Numeric::Matrix33RowMajor< TCoordinate > & intrinsicMatrix,
const int level = 0 )
{
typedef TPixel PixelType;
typedef cv::Mat_< PixelType > IntensityImageType;
typedef TCoordinate CoordinateType;
typedef Numeric::VectorCol4< CoordinateType > Vector4Type;
CoordinateType fx = intrinsicMatrix(0,0)/pow(2,level);
CoordinateType fy = intrinsicMatrix(1,1)/pow(2,level);
CoordinateType inv_fx = 1.f/fx;
CoordinateType inv_fy = 1.f/fy;
CoordinateType ox = intrinsicMatrix(0,2)/pow(2,level);
CoordinateType oy = intrinsicMatrix(1,2)/pow(2,level);
Vector4Type point3D;
Vector4Type transformedPoint3D;
int transformed_r,transformed_c; // 2D coordinates of the transformed pixel(r,c) of frame 1
warpedIntensityImage = cv::Mat_< PixelType >::zeros( intensityImage.rows, intensityImage.cols );
#if ENABLE_OPENMP_MULTITHREADING_WARP_IMAGE
#pragma omp parallel for private( point3D, transformedPoint3D, transformed_r, transformed_c )
#endif
for( int r=0; r<intensityImage.rows; r++)
{
for( int c=0; c<intensityImage.cols; c++)
{
if( depthImage(r,c)>0 ) //If has valid depth value
{
//Compute the local 3D coordinates of pixel(r,c) of frame 1
point3D(2) = depthImage(r,c); //z
point3D(0) = (c-ox) * point3D(2) * inv_fx; //x
point3D(1) = (r-oy) * point3D(2) * inv_fy; //y
point3D(3) = 1.0; //homogeneous coordinate
//Transform the 3D point using the transformation matrix Rt
transformedPoint3D = Rt * point3D;
//Project the 3D point to the 2D plane
transformed_c = static_cast< int >( ( ( transformedPoint3D(0) * fx ) /
transformedPoint3D(2) ) + ox ); //transformed x (2D)
transformed_r = static_cast< int >( ( ( transformedPoint3D(1) * fy ) /
transformedPoint3D(2) ) + oy ); //transformed y (2D)
//Asign the intensity value to the warped image and compute the difference between the transformed
//pixel of frame 1 and the corresponding pixel of frame 2. Compute the error function
if( transformed_r >= 0 && transformed_r < intensityImage.rows &
transformed_c >= 0 && transformed_c < intensityImage.cols)
{
warpedIntensityImage( transformed_r, transformed_c ) = intensityImage( r, c );
}
}
}
}
}
/*!This abstract class defines the mandatory methods that any derived class must implement to compute the rigid (6DoF) transformation that best aligns a pair of RGBD frames using a photoconsistency maximization approach.*/
template< class TPixel, class TCoordinate >
class CPhotoconsistencyOdometry
{
public:
typedef TPixel PixelType;
typedef cv::Mat_< PixelType > IntensityImageType;
typedef TCoordinate CoordinateType;
typedef cv::Mat_< CoordinateType > DepthImageType;
typedef Numeric::Matrix33RowMajor< CoordinateType > Matrix33Type;
typedef Numeric::Matrix44RowMajor< CoordinateType > Matrix44Type;
typedef Numeric::VectorCol6< CoordinateType > Vector6Type;
typedef Numeric::VectorCol4< CoordinateType > Vector4Type;
/*!Sets the 3x3 intrinsic pinhole matrix.*/
virtual void SetIntrinsicMatrix( const Matrix33Type & intrinsicMatrix ) = 0;
/*!Sets the source (Intensity+Depth) frame.*/
virtual void SetSourceFrame( const IntensityImageType & intensityImage,
const DepthImageType & depthImage ) = 0;
/*!Sets the source (Intensity+Depth) frame.*/
virtual void SetTargetFrame( const IntensityImageType & intensityImage,
const DepthImageType & depthImage ) = 0;
/*!Initializes the state vector to a certain value. The optimization process uses
*the initial state vector as the initial estimate.*/
virtual void SetInitialStateVector( const Vector6Type & initialStateVector ) = 0;
/*!Launches the least-squares optimization process to find the configuration of the
*state vector parameters that maximizes the photoconsistency between the source and
*target frame.*/
virtual void Optimize() = 0;
/*!Returns the optimal state vector. This method has to be called after calling the
*Optimize() method.*/
virtual Vector6Type GetOptimalStateVector() const = 0;
/*!Returns the optimal 4x4 rigid transformation matrix between the source and target frame.
*This method has to be called after calling the Optimize() method.*/
virtual Matrix44Type GetOptimalRigidTransformationMatrix() const = 0;
};
} //end namespace phovo
#endif
|
vmpush2.c | /* C Library for Skeleton 2D Electrostatic OpenMP/Vector PIC Code */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <math.h>
#include "vmpush2.h"
/*--------------------------------------------------------------------*/
double ranorm() {
/* this program calculates a random number y from a gaussian distribution
with zero mean and unit variance, according to the method of
mueller and box:
y(k) = (-2*ln(x(k)))**1/2*sin(2*pi*x(k+1))
y(k+1) = (-2*ln(x(k)))**1/2*cos(2*pi*x(k+1)),
where x is a random number uniformly distributed on (0,1).
written for the ibm by viktor k. decyk, ucla
local data */
static int r1 = 885098780, r2 = 1824280461;
static int r4 = 1396483093, r5 = 55318673;
static int iflg = 0;
static double h1l = 65531.0, h1u = 32767.0, h2l = 65525.0;
static double r0 = 0.0;
int isc, i1;
double ranorm, r3, asc, bsc, temp;
if (iflg==1) {
ranorm = r0;
r0 = 0.0;
iflg = 0;
return ranorm;
}
isc = 65536;
asc = (double) isc;
bsc = asc*asc;
i1 = r1 - (r1/isc)*isc;
r3 = h1l*(double) r1 + asc*h1u*(double) i1;
i1 = r3/bsc;
r3 -= ((double) i1)*bsc;
bsc = 0.5*bsc;
i1 = r2/isc;
isc = r2 - i1*isc;
r0 = h1l*(double) r2 + asc*h1u*(double) isc;
asc = 1.0/bsc;
isc = r0*asc;
r2 = r0 - ((double) isc)*bsc;
r3 += (double) isc + 2.0*h1u*(double) i1;
isc = r3*asc;
r1 = r3 - ((double) isc)*bsc;
temp = sqrt(-2.0*log((((double) r1) + ((double) r2)*asc)*asc));
isc = 65536;
asc = (double) isc;
bsc = asc*asc;
i1 = r4 - (r4/isc)*isc;
r3 = h2l*(double) r4 + asc*h1u*(double) i1;
i1 = r3/bsc;
r3 -= ((double) i1)*bsc;
bsc = 0.5*bsc;
i1 = r5/isc;
isc = r5 - i1*isc;
r0 = h2l*(double) r5 + asc*h1u*(double) isc;
asc = 1.0/bsc;
isc = r0*asc;
r5 = r0 - ((double) isc)*bsc;
r3 += (double) isc + 2.0*h1u*(double) i1;
isc = r3*asc;
r4 = r3 - ((double) isc)*bsc;
r0 = 6.28318530717959*((((double) r4) + ((double) r5)*asc)*asc);
ranorm = temp*sin(r0);
r0 = temp*cos(r0);
iflg = 1;
return ranorm;
}
/*--------------------------------------------------------------------*/
void cdistr2(float part[], float vtx, float vty, float vdx, float vdy,
int npx, int npy, int idimp, int nop, int nx, int ny,
int ipbc) {
/* for 2d code, this subroutine calculates initial particle co-ordinates
and velocities with uniform density and maxwellian velocity with drift
part[n][0] = position x of particle n
part[n][1] = position y of particle n
part[n][2] = velocity vx of particle n
part[n][3] = velocity vy of particle n
vtx/vty = thermal velocity of electrons in x/y direction
vdx/vdy = drift velocity of beam electrons in x/y direction
npx/npy = initial number of particles distributed in x/y direction
idimp = size of phase space = 4
nop = number of particles
nx/ny = system length in x/y direction
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
ranorm = gaussian random number with zero mean and unit variance
local data */
int j, k, k1, npxy;
float edgelx, edgely, at1, at2, at3, sum1, sum2;
double dsum1, dsum2;
npxy = npx*npy;
/* set boundary values */
edgelx = 0.0;
edgely = 0.0;
at1 = (float) nx/(float) npx;
at2 = (float) ny/(float) npy;
if (ipbc==2) {
edgelx = 1.0;
edgely = 1.0;
at1 = (float) (nx-2)/(float) npx;
at2 = (float) (ny-2)/(float) npy;
}
else if (ipbc==3) {
edgelx = 1.0;
at1 = (float) (nx-2)/(float) npx;
}
/* uniform density profile */
for (k = 0; k < npy; k++) {
k1 = idimp*npx*k;
at3 = edgely + at2*(((float) k) + 0.5);
for (j = 0; j < npx; j++) {
part[idimp*j+k1] = edgelx + at1*(((float) j) + 0.5);
part[1+idimp*j+k1] = at3;
}
}
/* maxwellian velocity distribution */
for (j = 0; j < npxy; j++) {
part[2+idimp*j] = vtx*ranorm();
part[3+idimp*j] = vty*ranorm();
}
/* add correct drift */
dsum1 = 0.0;
dsum2 = 0.0;
for (j = 0; j < npxy; j++) {
dsum1 += part[2+idimp*j];
dsum2 += part[3+idimp*j];
}
sum1 = dsum1;
sum2 = dsum2;
at1 = 1.0/(float) npxy;
sum1 = at1*sum1 - vdx;
sum2 = at1*sum2 - vdy;
for (j = 0; j < npxy; j++) {
part[2+idimp*j] -= sum1;
part[3+idimp*j] -= sum2;
}
return;
}
/*--------------------------------------------------------------------*/
void cdblkp2l(float part[], int kpic[], int *nppmx, int idimp, int nop,
int mx, int my, int mx1, int mxy1, int *irc) {
/* this subroutine finds the maximum number of particles in each tile of
mx, my to calculate size of segmented particle array ppart
linear interpolation
part = input particle array
part[n][0] = position x of particle n
part[n][1] = position y of particle n
kpic = output number of particles per tile
nppmx = return maximum number of particles in tile
idimp = size of phase space = 4
nop = number of particles
mx/my = number of grids in sorting cell in x and y
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int j, k, n, m, isum, ist, npx, ierr;
ierr = 0;
/* clear counter array */
for (k = 0; k < mxy1; k++) {
kpic[k] = 0;
}
/* find how many particles in each tile */
for (j = 0; j < nop; j++) {
n = part[idimp*j];
m = part[1+idimp*j];
n = n/mx;
m = m/my;
m = n + mx1*m;
if (m < mxy1) {
kpic[m] += 1;
}
else {
ierr = ierr > (m - mxy1 + 1) ? ierr : (m - mxy1 + 1);
}
}
/* find maximum */
isum = 0;
npx = 0;
for (k = 0; k < mxy1; k++) {
ist = kpic[k];
npx = npx > ist ? npx : ist;
isum += ist;
}
*nppmx = npx;
/* check for errors */
if (ierr > 0) {
*irc = ierr;
}
else if (isum != nop) {
*irc = -1;
}
return;
}
/*--------------------------------------------------------------------*/
void cppmovin2lt(float part[], float ppart[], int kpic[], int nppmx,
int idimp, int nop, int mx, int my, int mx1, int mxy1,
int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
and copies to segmented array ppart
linear interpolation
input: all except ppart, kpic, output: ppart, kpic
part/ppart = input/output particle arrays
part[n][0] = position x of particle n in partition
part[n][1] = position y of particle n in partition
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
kpic = output number of particles per tile
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
nop = number of particles
mx/my = number of grids in sorting cell in x and y
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int i, j, k, n, m, ip, ierr;
ierr = 0;
/* clear counter array */
for (k = 0; k < mxy1; k++) {
kpic[k] = 0;
}
/* find addresses of particles at each tile and reorder particles */
for (j = 0; j < nop; j++) {
n = part[idimp*j];
m = part[1+idimp*j];
n = n/mx;
m = m/my;
m = n + mx1*m;
ip = kpic[m];
if (ip < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[ip+nppmx*(i+idimp*m)] = part[i+idimp*j];
}
}
else {
ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1;
}
kpic[m] = ip + 1;
}
if (ierr > 0)
*irc = ierr;
return;
}
/*--------------------------------------------------------------------*/
void cppmovin2ltp(float part[], float ppart[], int kpic[], int kp[],
int nppmx, int idimp, int nop, int mx, int my,
int mx1, int mxy1, int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
and copies to segmented array ppart
designed for NUMA architectures, where memory is associated with the
processor which first writes a memory location.
linear interpolation
input: all except ppart, kpic, output: ppart, kpic
part/ppart = input/output particle arrays
part[n][0] = position x of particle n in partition
part[n][1] = position y of particle n in partition
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
kpic = output number of particles per tile
kp = original location of reordered particle
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
nop = number of particles
mx/my = number of grids in sorting cell in x and y
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int i, j, k, n, m, ip, npp, ierr;
ierr = 0;
/* clear counter array */
for (k = 0; k < mxy1; k++) {
kpic[k] = 0;
}
/* find addresses of particles at each tile to reorder particles */
for (j = 0; j < nop; j++) {
n = part[idimp*j];
m = part[1+idimp*j];
n = n/mx;
m = m/my;
m = n + mx1*m;
ip = kpic[m];
if (ip < nppmx) {
kp[ip+nppmx*m] = j;
}
else {
ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1;
}
kpic[m] = ip + 1;
}
/* check for overflow */
if (ierr > 0) {
*irc = ierr;
return;
}
/* copy reordered particles */
#pragma omp parallel for private(i,j,k,m,npp)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
for (j = 0; j < npp; j++) {
m = kp[j+nppmx*k];
for (i = 0; i < idimp; i++) {
ppart[j+nppmx*(i+idimp*k)] = part[i+idimp*m];
}
}
}
return;
}
/*--------------------------------------------------------------------*/
void cppcheck2lt(float ppart[], int kpic[], int idimp, int nppmx, int nx,
int ny, int mx, int my, int mx1, int my1,
int *irc) {
/* this subroutine performs a sanity check to make sure particles sorted
by x,y grid in tiles of mx, my, are all within bounds.
tiles are assumed to be arranged in 2D linear memory, and transposed
input: all except irc
output: irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
kpic[k] = number of reordered output particles in tile k
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
irc = particle error, returned only if error occurs, when irc > 0
local data */
int mxy1, noff, moff, npp, j, k, ist, nn, mm;
float edgelx, edgely, edgerx, edgery, dx, dy;
mxy1 = mx1*my1;
/* loop over tiles */
#pragma omp parallel for \
private(j,k,noff,moff,npp,nn,mm,ist,edgelx,edgely,edgerx,edgery,dx,dy)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
dx = ppart[j+nppmx*(idimp*k)];
dy = ppart[j+nppmx*(1+idimp*k)];
/* find particles going out of bounds */
ist = 0;
if (dx < edgelx)
ist = 1;
if (dx >= edgerx)
ist = 2;
if (dy < edgely)
ist += 3;
if (dy >= edgery)
ist += 6;
if (ist > 0)
*irc = k + 1;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cgppush2lt(float ppart[], float fxy[], int kpic[], float qbm,
float dt, float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ipbc) {
/* for 2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with various boundary conditions.
OpenMP version using guard cells
data read in tiles
particles stored segmented array
44 flops/particle, 12 loads, 4 stores
input: all, output: ppart, ek
equations used are:
vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t))*dt,
vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t))*dt,
where q/m is charge/mass, and
x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)) and fy(x(t),y(t)) are approximated by interpolation from
the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
fy(x,y) = (1-dy)*((1-dx)*fy(n,m)+dx*fy(n+1,m)) + dy*((1-dx)*fy(n,m+1)
+ dx*fy(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass
dt = time interval between successive calculations
kinetic energy/mass at time t is also calculated, using
ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp;
int i, j, k, nn, mm, mxv;
float qtm, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy;
float sfxy[2*MXV*MYV];
/* float sfxy[2*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv = mx + 1;
qtm = qbm*dt;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,dx,dy,vx, \
vy,sum1,sfxy) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[2*(i+mxv*j)] = fxy[2*(i+noff+nxv*(j+moff))];
sfxy[1+2*(i+mxv*j)] = fxy[1+2*(i+noff+nxv*(j+moff))];
}
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nn = 2*(nn - noff + mxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find acceleration */
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dx = amy*(dxp*sfxy[nn+2] + dx);
dy = amy*(dxp*sfxy[nn+3] + dy);
nn += 2*mxv;
vx = amx*sfxy[nn];
vy = amx*sfxy[nn+1];
dx += dyp*(dxp*sfxy[nn+2] + vx);
dy += dyp*(dxp*sfxy[nn+3] + vy);
/* new velocity */
dxp = ppart[j+2*nppmx+npoff];
dyp = ppart[j+3*nppmx+npoff];
vx = dxp + qtm*dx;
vy = dyp + qtm*dy;
/* average kinetic energy */
dxp += vx;
dyp += vy;
sum1 += dxp*dxp + dyp*dyp;
/* new position */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
}
sum2 += sum1;
}
/* normalize kinetic energy */
*ek += 0.125f*sum2;
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgppushf2lt(float ppart[], float fxy[], int kpic[], int ncl[],
int ihole[], float qbm, float dt, float *ek, int idimp,
int nppmx, int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ntmax, int *irc) {
/* for 2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with periodic boundary conditions.
also determines list of particles which are leaving this tile
OpenMP version using guard cells
data read in tiles
particles stored segmented array
44 flops/particle, 12 loads, 4 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc
equations used are:
vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t))*dt,
vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t))*dt,
where q/m is charge/mass, and
x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)) and fy(x(t),y(t)) are approximated by interpolation from
the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
fy(x,y) = (1-dy)*((1-dx)*fy(n,m)+dx*fy(n+1,m)) + dy*((1-dx)*fy(n,m+1)
+ dx*fy(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass
dt = time interval between successive calculations
kinetic energy/mass at time t is also calculated, using
ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp;
int i, j, k, ih, nh, nn, mm, mxv;
float qtm, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy;
float anx, any, edgelx, edgely, edgerx, edgery;
float sfxy[2*MXV*MYV];
/* float sfxy[2*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv = mx + 1;
qtm = qbm*dt;
anx = (float) nx;
any = (float) ny;
sum2 = 0.0;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,ih,nh,x,y,dxp,dyp,amx,amy, \
dx,dy,vx,vy,edgelx,edgely,edgerx,edgery,sum1,sfxy) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* load local fields from global array */
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[2*(i+mxv*j)] = fxy[2*(i+noff+nxv*(j+moff))];
sfxy[1+2*(i+mxv*j)] = fxy[1+2*(i+noff+nxv*(j+moff))];
}
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nn = 2*(nn - noff + mxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find acceleration */
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dx = amy*(dxp*sfxy[nn+2] + dx);
dy = amy*(dxp*sfxy[nn+3] + dy);
nn += 2*mxv;
vx = amx*sfxy[nn];
vy = amx*sfxy[nn+1];
dx += dyp*(dxp*sfxy[nn+2] + vx);
dy += dyp*(dxp*sfxy[nn+3] + vy);
/* new velocity */
dxp = ppart[j+2*nppmx+npoff];
dyp = ppart[j+3*nppmx+npoff];
vx = dxp + qtm*dx;
vy = dyp + qtm*dy;
/* average kinetic energy */
dxp += vx;
dyp += vy;
sum1 += dxp*dxp + dyp*dyp;
/* new position */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
sum2 += sum1;
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* normalize kinetic energy */
*ek += 0.125f*sum2;
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgppush2lt(float ppart[], float fxy[], int kpic[], float qbm,
float dt, float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ipbc) {
/* for 2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with various boundary conditions.
vectorizable/OpenMP version using guard cells
data read in tiles
particles stored segmented array
44 flops/particle, 12 loads, 4 stores
input: all, output: ppart, ek
equations used are:
vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t))*dt,
vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t))*dt,
where q/m is charge/mass, and
x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)) and fy(x(t),y(t)) are approximated by interpolation from
the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
fy(x,y) = (1-dy)*((1-dx)*fy(n,m)+dx*fy(n+1,m)) + dy*((1-dx)*fy(n,m+1)
+ dx*fy(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass
dt = time interval between successive calculations
kinetic energy/mass at time t is also calculated, using
ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
int noff, moff, npoff, npp, ipp, joff, nps;
int i, j, k, m, nn, mm, lxv;
float qtm, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy;
float sfxy[2*MXV*MYV];
/* float sfxy[2*(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s[NPBLK*LVECT], t[NPBLK*2];
double sum1, sum2;
lxv = mx + 1;
qtm = qbm*dt;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,x,y,dxp,dyp, \
amx,amy,dx,dy,vx,vy,sum1,sfxy,n,s,t) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[2*(i+lxv*j)] = fxy[2*(i+noff+nxv*(j+moff))];
sfxy[1+2*(i+lxv*j)] = fxy[1+2*(i+noff+nxv*(j+moff))];
}
}
sum1 = 0.0;
/* loop over particles in tile */
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
n[j] = nn - noff + lxv*(mm - moff);
amx = 1.0f - dxp;
amy = 1.0f - dyp;
s[j] = amx*amy;
s[j+NPBLK] = dxp*amy;
s[j+2*NPBLK] = amx*dyp;
s[j+3*NPBLK] = dxp*dyp;
t[j] = x;
t[j+NPBLK] = y;
}
/* find acceleration */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + lxv - 2;
dx = 0.0f;
dy = 0.0f;
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
dx += sfxy[2*(i+nn)]*s[j+NPBLK*i];
dy += sfxy[1+2*(i+nn)]*s[j+NPBLK*i];
}
s[j] = dx;
s[j+NPBLK] = dy;
}
/* new velocity */
for (j = 0; j < NPBLK; j++) {
x = t[j];
y = t[j+NPBLK];
dxp = ppart[j+joff+2*nppmx+npoff];
dyp = ppart[j+joff+3*nppmx+npoff];
vx = dxp + qtm*s[j];
vy = dyp + qtm*s[j+NPBLK];
/* average kinetic energy */
dxp += vx;
dyp += vy;
sum1 += dxp*dxp + dyp*dyp;
/* new position */
s[j] = x + vx*dt;
s[j+NPBLK] = y + vy*dt;
s[j+2*NPBLK] = vx;
s[j+3*NPBLK] = vy;
}
/* check boundary conditions */
#pragma novector
for (j = 0; j < NPBLK; j++) {
dx = s[j];
dy = s[j+NPBLK];
vx = s[j+2*NPBLK];
vy = s[j+3*NPBLK];
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = t[j];
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = t[j+NPBLK];
vy = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = t[j];
vx = -vx;
}
}
/* set new position */
ppart[j+joff+npoff] = dx;
ppart[j+joff+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+joff+2*nppmx+npoff] = vx;
ppart[j+joff+3*nppmx+npoff] = vy;
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nn = 2*(nn - noff + lxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find acceleration */
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dx = amy*(dxp*sfxy[nn+2] + dx);
dy = amy*(dxp*sfxy[nn+3] + dy);
nn += 2*lxv;
vx = amx*sfxy[nn];
vy = amx*sfxy[nn+1];
dx += dyp*(dxp*sfxy[nn+2] + vx);
dy += dyp*(dxp*sfxy[nn+3] + vy);
/* new velocity */
dxp = ppart[j+2*nppmx+npoff];
dyp = ppart[j+3*nppmx+npoff];
vx = dxp + qtm*dx;
vy = dyp + qtm*dy;
/* average kinetic energy */
dxp += vx;
dyp += vy;
sum1 += dxp*dxp + dyp*dyp;
/* new position */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
}
sum2 += sum1;
}
/* normalize kinetic energy */
*ek += 0.125f*sum2;
return;
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgppushf2lt(float ppart[], float fxy[], int kpic[], int ncl[],
int ihole[], float qbm, float dt, float *ek,
int idimp, int nppmx, int nx, int ny, int mx, int my,
int nxv, int nyv, int mx1, int mxy1, int ntmax,
int *irc) {
/* for 2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with periodic boundary conditions.
also determines list of particles which are leaving this tile
vectorizable/OpenMP version using guard cells
data read in tiles
particles stored segmented array
44 flops/particle, 12 loads, 4 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc
equations used are:
vx(t+dt/2) = vx(t-dt/2) + (q/m)*fx(x(t),y(t))*dt,
vy(t+dt/2) = vy(t-dt/2) + (q/m)*fy(x(t),y(t))*dt,
where q/m is charge/mass, and
x(t+dt) = x(t) + vx(t+dt/2)*dt, y(t+dt) = y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)) and fy(x(t),y(t)) are approximated by interpolation from
the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
fy(x,y) = (1-dy)*((1-dx)*fy(n,m)+dx*fy(n+1,m)) + dy*((1-dx)*fy(n,m+1)
+ dx*fy(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = velocity vx of particle n in tile m
ppart[m][3][n] = velocity vy of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass
dt = time interval between successive calculations
kinetic energy/mass at time t is also calculated, using
ek = .125*sum((vx(t+dt/2)+vx(t-dt/2))**2+(vy(t+dt/2)+vy(t-dt/2))**2)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
int noff, moff, npoff, npp, ipp, joff, nps;
int i, j, k, m, ih, nh, nn, mm, lxv;
float qtm, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy;
float anx, any, edgelx, edgely, edgerx, edgery;
float sfxy[2*MXV*MYV];
/* float sfxy[2*(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s[NPBLK*LVECT], t[NPBLK*2];
double sum1, sum2;
lxv = mx + 1;
qtm = qbm*dt;
anx = (float) nx;
any = (float) ny;
sum2 = 0.0;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,ih,nh,x,y,dxp, \
dyp,amx,amy,dx,dy,vx,vy,edgelx,edgely,edgerx,edgery,sum1,sfxy,n,s,t) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* load local fields from global array */
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[2*(i+lxv*j)] = fxy[2*(i+noff+nxv*(j+moff))];
sfxy[1+2*(i+lxv*j)] = fxy[1+2*(i+noff+nxv*(j+moff))];
}
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
sum1 = 0.0;
/* loop over particles in tile */
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
n[j] = nn - noff + lxv*(mm - moff);
amx = 1.0f - dxp;
amy = 1.0f - dyp;
s[j] = amx*amy;
s[j+NPBLK] = dxp*amy;
s[j+2*NPBLK] = amx*dyp;
s[j+3*NPBLK] = dxp*dyp;
t[j] = x;
t[j+NPBLK] = y;
}
/* find acceleration */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + lxv - 2;
dx = 0.0f;
dy = 0.0f;
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
dx += sfxy[2*(i+nn)]*s[j+NPBLK*i];
dy += sfxy[1+2*(i+nn)]*s[j+NPBLK*i];
}
s[j] = dx;
s[j+NPBLK] = dy;
}
/* new velocity */
for (j = 0; j < NPBLK; j++) {
x = t[j];
y = t[j+NPBLK];
dxp = ppart[j+joff+2*nppmx+npoff];
dyp = ppart[j+joff+3*nppmx+npoff];
vx = dxp + qtm*s[j];
vy = dyp + qtm*s[j+NPBLK];
/* average kinetic energy */
dxp += vx;
dyp += vy;
sum1 += dxp*dxp + dyp*dyp;
/* new position */
s[j] = x + vx*dt;
s[j+NPBLK] = y + vy*dt;
s[j+2*NPBLK] = vx;
s[j+3*NPBLK] = vy;
}
/* check boundary conditions */
#pragma novector
for (j = 0; j < NPBLK; j++) {
dx = s[j];
dy = s[j+NPBLK];
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+joff+npoff] = dx;
ppart[j+joff+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+joff+2*nppmx+npoff] = s[j+2*NPBLK];
ppart[j+joff+3*nppmx+npoff] = s[j+3*NPBLK];
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + joff + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nn = 2*(nn - noff + lxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find acceleration */
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dx = amy*(dxp*sfxy[nn+2] + dx);
dy = amy*(dxp*sfxy[nn+3] + dy);
nn += 2*lxv;
vx = amx*sfxy[nn];
vy = amx*sfxy[nn+1];
dx += dyp*(dxp*sfxy[nn+2] + vx);
dy += dyp*(dxp*sfxy[nn+3] + vy);
/* new velocity */
dxp = ppart[j+2*nppmx+npoff];
dyp = ppart[j+3*nppmx+npoff];
vx = dxp + qtm*dx;
vy = dyp + qtm*dy;
/* average kinetic energy */
dxp += vx;
dyp += vy;
sum1 += dxp*dxp + dyp*dyp;
/* new position */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
sum2 += sum1;
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* normalize kinetic energy */
*ek += 0.125f*sum2;
return;
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgppost2lt(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int nxv, int nyv,
int mx1, int mxy1) {
/* for 2d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
17 flops/particle, 6 loads, 4 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m)=qm*(1.-dx)*(1.-dy)
q(n+1,m)=qm*dx*(1.-dy)
q(n,m+1)=qm*(1.-dx)*dy
q(n+1,m+1)=qm*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
q[k][j] = charge density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp, mxv;
int i, j, k, nn, mm;
float x, y, dxp, dyp, amx, amy;
float sq[MXV*MYV];
/* float sq[(mx+1)*(my+1)]; */
mxv = mx + 1;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,sq)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
for (j = 0; j < mxv*(my+1); j++) {
sq[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = nn - noff + mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit charge within tile to local accumulator */
x = sq[nn] + amx*amy;
y = sq[nn+1] + dxp*amy;
sq[nn] = x;
sq[nn+1] = y;
nn += mxv;
x = sq[nn] + amx*dyp;
y = sq[nn+1] + dxp*dyp;
sq[nn] = x;
sq[nn+1] = y;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
q[i+noff+nxv*(j+moff)] += sq[i+mxv*j];
}
}
/* deposit charge to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff] += sq[i];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)] += sq[i+mxv*(mm-1)];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)] += sq[mxv*j];
if (nn > mx) {
#pragma omp atomic
q[nn+noff-1+nxv*(j+moff)] += sq[nn-1+mxv*j];
}
}
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgppost2lt(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int nxv, int nyv,
int mx1, int mxy1) {
/* for 2d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
vectorizable/OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
17 flops/particle, 6 loads, 4 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m)=qm*(1.-dx)*(1.-dy)
q(n+1,m)=qm*dx*(1.-dy)
q(n,m+1)=qm*(1.-dx)*dy
q(n+1,m+1)=qm*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
q[k][j] = charge density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
int noff, moff, npoff, npp, ipp, joff, nps;
int i, j, k, m, nn, mm, lxv;
float x, y, dxp, dyp, amx, amy;
float sq[MXV*MYV];
/* float sq[(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s[NPBLK*LVECT];
lxv = mx + 1;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,x,y,dxp,dyp, \
amx,amy,sq,n,s)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
for (j = 0; j < lxv*(my+1); j++) {
sq[j] = 0.0f;
}
/* loop over particles in tile */
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
n[j] = nn - noff + lxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
s[j] = amx*amy;
s[j+NPBLK] = dxp*amy;
s[j+2*NPBLK] = amx*dyp;
s[j+3*NPBLK] = dxp*dyp;
}
/* deposit charge within tile to local accumulator */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + lxv - 2;
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
sq[i+nn] += s[j+NPBLK*i];
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = nn - noff + lxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit charge within tile to local accumulator */
x = sq[nn] + amx*amy;
y = sq[nn+1] + dxp*amy;
sq[nn] = x;
sq[nn+1] = y;
nn += lxv;
x = sq[nn] + amx*dyp;
y = sq[nn+1] + dxp*dyp;
sq[nn] = x;
sq[nn+1] = y;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
#pragma ivdep
for (i = 1; i < nn; i++) {
q[i+noff+nxv*(j+moff)] += sq[i+lxv*j];
}
}
/* deposit charge to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff] += sq[i];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)] += sq[i+lxv*(mm-1)];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)] += sq[lxv*j];
if (nn > mx) {
#pragma omp atomic
q[nn+noff-1+nxv*(j+moff)] += sq[nn-1+lxv*j];
}
}
}
return;
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cviscan2(int *isdata, int *mb, int nths) {
/* performs vectorizable prefix reduction of integer data */
/* using binary tree method. */
/* local data */
int j, kxs, lb, ns;
ns = nths/2;
for (j = 0; j < ns; j++) {
mb[j] = j;
}
kxs = 1;
while (kxs < nths) {
#pragma ivdep
for (j = 0; j < ns; j++) {
lb = kxs*mb[j];
if ((j+lb+kxs) < nths) {
isdata[j+lb+kxs] += isdata[2*lb+kxs-1];
}
mb[j] >>= 1;
}
kxs <<= 1;
}
return;
}
/*--------------------------------------------------------------------*/
void cpporder2lt(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int nx, int ny,
int mx, int my, int mx1, int my1, int npbmx, int ntmax,
int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
algorithm has 3 steps. first, one finds particles leaving tile and
stores their number in each directon, location, and destination in ncl
and ihole. second, a prefix scan of ncl is performed and departing
particles are buffered in ppbuff in direction order. finally, we copy
the incoming particles from other tiles into ppart.
input: all except ppbuff, ncl, ihole, irc
output: ppart, ppbuff, kpic, ncl, ihole, irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, noff, moff, npoff, npp, nboff, ncoff;
int i, j, k, ii, kx, ky, ih, nh, ist, nn, mm, isum;
int ip, j1, j2, kxl, kxr, kk, kl, kr;
float anx, any, edgelx, edgely, edgerx, edgery, dx, dy;
int ks[8];
mxy1 = mx1*my1;
anx = (float) nx;
any = (float) ny;
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(j,k,noff,moff,npp,npoff,nn,mm,ih,nh,ist,dx,dy,edgelx,edgely, \
edgerx,edgery)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ih = 0;
nh = 0;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
dx = ppart[j+npoff];
dy = ppart[j+nppmx+npoff];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[j+npoff] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[j+npoff] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[j+nppmx+npoff] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[j+nppmx+npoff] = dy;
}
else {
ist += 3;
}
}
if (ist > 0) {
ncl[ist+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,npoff,nboff,isum,ist,nh,ip,j1,ii)
for (k = 0; k < mxy1; k++) {
npoff = idimp*nppmx*k;
nboff = idimp*npbmx*k;
/* find address offset for ordered ppbuff array */
isum = 0;
for (j = 0; j < 8; j++) {
ist = ncl[j+8*k];
ncl[j+8*k] = isum;
isum += ist;
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* loop over particles leaving tile */
for (j = 0; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = ncl[ist+8*k-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ncl[ist+8*k-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,ii,kk,npp,npoff,nboff,kx,ky,kl,kr,kxl,kxr,ih,nh,nn, \
ncoff,ist,j1,j2,ip,ks)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
npoff = idimp*nppmx*k;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+8*ks[ii]] - ncoff;
for (j = 0; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
for (j = 0; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
}
npp -= ip;
}
kpic[k] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void cpporderf2lt(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int mx1, int my1,
int npbmx, int ntmax, int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
the algorithm has 2 steps. first, a prefix scan of ncl is performed
and departing particles are buffered in ppbuff in direction order.
then we copy the incoming particles from other tiles into ppart.
it assumes that the number, location, and destination of particles
leaving a tile have been previously stored in ncl and ihole by the
cgppushf2lt procedure.
input: all except ppbuff, irc
output: ppart, ppbuff, kpic, ncl, irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, npoff, npp, nboff, ncoff;
int i, j, k, ii, kx, ky, ih, nh, ist, nn, isum;
int ip, j1, j2, kxl, kxr, kk, kl, kr;
int ks[8];
mxy1 = mx1*my1;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,npoff,nboff,isum,ist,nh,ip,j1,ii)
for (k = 0; k < mxy1; k++) {
npoff = idimp*nppmx*k;
nboff = idimp*npbmx*k;
/* find address offset for ordered ppbuff array */
isum = 0;
for (j = 0; j < 8; j++) {
ist = ncl[j+8*k];
ncl[j+8*k] = isum;
isum += ist;
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* loop over particles leaving tile */
for (j = 0; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = ncl[ist+8*k-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ncl[ist+8*k-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,ii,kk,npp,npoff,nboff,kx,ky,kl,kr,kxl,kxr,ih,nh,nn, \
ncoff,ist,j1,j2,ip,ks)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
npoff = idimp*nppmx*k;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+8*ks[ii]] - ncoff;
for (j = 0; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
for (j = 0; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
}
npp -= ip;
}
kpic[k] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void cvpporder2lt(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int nx, int ny,
int mx, int my, int mx1, int my1, int npbmx,
int ntmax, int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
algorithm has 3 steps. first, one finds particles leaving tile and
stores their number in each directon, location, and destination in ncl
and ihole. second, a prefix scan of ncl is performed and departing
particles are buffered in ppbuff in direction order. finally, we copy
the incoming particles from other tiles into ppart.
input: all except ppbuff, ncl, ihole, irc
output: ppart, ppbuff, kpic, ncl, ihole, irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
#define NPBLK 16
int mxy1, noff, moff, npoff, npp, ipp, joff, nps, nboff, ncoff;
int i, j, k, m, ii, kx, ky, ih, nh, ist, nn, mm, in;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lb, kxs;
float anx, any, edgelx, edgely, edgerx, edgery, dx, dy;
int sncl[8], ks[8];
/* scratch arrays */
int n[NPBLK*3];
mxy1 = mx1*my1;
anx = (float) nx;
any = (float) ny;
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(j,k,noff,moff,npp,npoff,nn,mm,ih,nh,ist,dx,dy,edgelx,edgely, \
edgerx,edgery)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ih = 0;
nh = 0;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
dx = ppart[j+npoff];
dy = ppart[j+nppmx+npoff];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[j+npoff] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[j+npoff] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[j+nppmx+npoff] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[j+nppmx+npoff] = dy;
}
else {
ist += 3;
}
}
if (ist > 0) {
ncl[ist+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,kxs,lb,npoff,nboff,ist,nh,ip,ipp,nps,joff,j1,ii,sncl, \
ks,n)
for (k = 0; k < mxy1; k++) {
npoff = idimp*nppmx*k;
nboff = idimp*npbmx*k;
/* find address offset for ordered ppbuff array */
for (j = 0; j < 8; j++) {
sncl[j] = ncl[j+8*k];
ks[j] = j;
}
kxs = 1;
while (kxs < 8) {
#pragma ivdep
for (j = 0; j < 4; j++) {
lb = kxs*ks[j];
sncl[j+lb+kxs] += sncl[2*lb+kxs-1];
ks[j] >>= 1;
}
kxs <<= 1;
}
for (j = 0; j < 8; j++) {
sncl[j] -= ncl[j+8*k];
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* buffer particles that are leaving tile, in direction order */
/* loop over particles leaving tile */
ipp = nh/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m + 1;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
n[j] = ihole[2*(j+joff+(ntmax+1)*k)] - 1;
n[j+NPBLK] = ihole[1+2*(j+joff+(ntmax+1)*k)];
}
/* calculate offsets */
for (j = 0; j < NPBLK; j++) {
ist = n[j+NPBLK];
ii = sncl[ist-1];
n[j+NPBLK] = ii;
sncl[ist-1] = ii + 1;
}
/* buffer particles that are leaving tile, in direction order */
for (i = 0; i < idimp; i++) {
for (j = 0; j < NPBLK; j++) {
j1 = n[j];
ii = n[j+NPBLK];
if (ii < npbmx) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
else {
ip = 1;
}
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = sncl[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
sncl[ist-1] = ii + 1;
}
for (j = 0; j < 8; j++) {
ncl[j+8*k] = sncl[j];
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,ii,kk,in,npp,npoff,nboff,ipp,joff,nps,kx,ky,kl,kr,kxl, \
kxr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,ks,n)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
npoff = idimp*nppmx*k;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+8*ks[ii]] - ncoff;
/* loop over particles coming from direction ii */
ipp = ip/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* insert incoming particles into holes */
if ((j+ih) < nh) {
j1 = ihole[2*(j+ih+1+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp + j + ih - nh;
}
n[j] = j1;
}
for (i = 0; i < idimp; i++) {
for (j = 0; j < NPBLK; j++) {
j1 = n[j];
if (j1 < nppmx) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+joff+ncoff+npbmx*i+nboff];
}
else {
ist = 1;
}
}
}
ih += NPBLK;
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp + ih - nh - 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
if (ih > nh)
npp = npp + ih - nh;
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
ii = nh;
ipp = ip/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
n[j+NPBLK] = ihole[2*(ih+j+1+(ntmax+1)*k)] - 1;
n[j+2*NPBLK] = ihole[2*(ii-j+(ntmax+1)*k)] - 1;
}
in = 0;
mm = 0;
nn = n[in+2*NPBLK];
for (j = 0; j < NPBLK; j++) {
j1 = npp - j - joff - 1;
n[j] = n[mm+NPBLK];
if (j1==nn) {
in += 1;
nn = n[in+2*NPBLK];
n[j] = -1;
}
else {
mm += 1;
}
}
for (i = 0; i < idimp; i++) {
#pragma ivdep
for (j = 0; j < NPBLK; j++) {
j1 = npp - j - joff - 1;
j2 = n[j];
if (j2 >= 0) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
}
}
ii -= in;
ih += mm;
}
nps = NPBLK*ipp;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
/* loop over remaining particles */
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
}
npp -= ip;
}
kpic[k] = npp;
}
return;
#undef NPBLK
}
/*--------------------------------------------------------------------*/
void cvpporderf2lt(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int mx1, int my1,
int npbmx, int ntmax, int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
the algorithm has 2 steps. first, a prefix scan of ncl is performed
and departing particles are buffered in ppbuff in direction order.
then we copy the incoming particles from other tiles into ppart.
it assumes that the number, location, and destination of particles
leaving a tile have been previously stored in ncl and ihole by the
cgppushf2lt procedure.
input: all except ppbuff, irc
output: ppart, ppbuff, kpic, ncl, irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
#define NPBLK 16
int mxy1, npoff, npp, nboff, ncoff;
int i, j, k, ii, kx, ky, ih, nh, ist, nn, mm, in;
int ip, j1, j2, kxl, kxr, kk, kl, kr;
int lb, kxs, m, ipp, nps, joff;
int sncl[8], ks[8];
/* scratch arrays */
int n[NPBLK*3];
mxy1 = mx1*my1;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,kxs,lb,npoff,nboff,ist,nh,ip,ipp,nps,joff,j1,ii,sncl, \
ks,n)
for (k = 0; k < mxy1; k++) {
npoff = idimp*nppmx*k;
nboff = idimp*npbmx*k;
/* find address offset for ordered ppbuff array */
for (j = 0; j < 8; j++) {
sncl[j] = ncl[j+8*k];
ks[j] = j;
}
kxs = 1;
while (kxs < 8) {
#pragma ivdep
for (j = 0; j < 4; j++) {
lb = kxs*ks[j];
sncl[j+lb+kxs] += sncl[2*lb+kxs-1];
ks[j] >>= 1;
}
kxs <<= 1;
}
for (j = 0; j < 8; j++) {
sncl[j] -= ncl[j+8*k];
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* buffer particles that are leaving tile, in direction order */
/* loop over particles leaving tile */
ipp = nh/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m + 1;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
n[j] = ihole[2*(j+joff+(ntmax+1)*k)] - 1;
n[j+NPBLK] = ihole[1+2*(j+joff+(ntmax+1)*k)];
}
/* calculate offsets */
for (j = 0; j < NPBLK; j++) {
ist = n[j+NPBLK];
ii = sncl[ist-1];
n[j+NPBLK] = ii;
sncl[ist-1] = ii + 1;
}
/* buffer particles that are leaving tile, in direction order */
for (i = 0; i < idimp; i++) {
for (j = 0; j < NPBLK; j++) {
j1 = n[j];
ii = n[j+NPBLK];
if (ii < npbmx) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
else {
ip = 1;
}
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = sncl[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
sncl[ist-1] = ii + 1;
}
for (j = 0; j < 8; j++) {
ncl[j+8*k] = sncl[j];
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,ii,kk,in,npp,npoff,nboff,ipp,joff,nps,kx,ky,kl,kr,kxl, \
kxr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,ks,n)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
npoff = idimp*nppmx*k;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+8*ks[ii]] - ncoff;
/* loop over particles coming from direction ii */
ipp = ip/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* insert incoming particles into holes */
if ((j+ih) < nh) {
j1 = ihole[2*(j+ih+1+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp + j + ih - nh;
}
n[j] = j1;
}
for (i = 0; i < idimp; i++) {
for (j = 0; j < NPBLK; j++) {
j1 = n[j];
if (j1 < nppmx) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+joff+ncoff+npbmx*i+nboff];
}
else {
ist = 1;
}
}
}
ih += NPBLK;
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp + ih - nh - 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
if (ih > nh)
npp = npp + ih - nh;
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
ii = nh;
ipp = ip/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
n[j+NPBLK] = ihole[2*(ih+j+1+(ntmax+1)*k)] - 1;
n[j+2*NPBLK] = ihole[2*(ii-j+(ntmax+1)*k)] - 1;
}
in = 0;
mm = 0;
nn = n[in+2*NPBLK];
for (j = 0; j < NPBLK; j++) {
j1 = npp - j - joff - 1;
n[j] = n[mm+NPBLK];
if (j1==nn) {
in += 1;
nn = n[in+2*NPBLK];
n[j] = -1;
}
else {
mm += 1;
}
}
for (i = 0; i < idimp; i++) {
#pragma ivdep
for (j = 0; j < NPBLK; j++) {
j1 = npp - j - joff - 1;
j2 = n[j];
if (j2 >= 0) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
}
}
ii -= in;
ih += mm;
}
nps = NPBLK*ipp;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
/* loop over remaining particles */
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
}
npp -= ip;
}
kpic[k] = npp;
}
return;
#undef NPBLK
}
/*--------------------------------------------------------------------*/
void ccguard2l(float fxy[], int nx, int ny, int nxe, int nye) {
/* replicate extended periodic vector field fxy
linear interpolation
nx/ny = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nxe = second dimension of field arrays, must be >= ny+1
local data */
int j, k;
/* copy edges of extended field */
for (k = 0; k < ny; k++) {
fxy[2*nx+2*nxe*k] = fxy[2*nxe*k];
fxy[1+2*nx+2*nxe*k] = fxy[1+2*nxe*k];
}
for (j = 0; j < nx; j++) {
fxy[2*j+2*nxe*ny] = fxy[2*j];
fxy[1+2*j+2*nxe*ny] = fxy[1+2*j];
}
fxy[2*nx+2*nxe*ny] = fxy[0];
fxy[1+2*nx+2*nxe*ny] = fxy[1];
return;
}
/*--------------------------------------------------------------------*/
void caguard2l(float q[], int nx, int ny, int nxe, int nye) {
/* accumulate extended periodic scalar field q
linear interpolation
nx/ny = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nxe = second dimension of field arrays, must be >= ny+1
local data */
int j, k;
/* accumulate edges of extended field */
for (k = 0; k < ny; k++) {
q[nxe*k] += q[nx+nxe*k];
q[nx+nxe*k] = 0.0;
}
for (j = 0; j < nx; j++) {
q[j] += q[j+nxe*ny];
q[j+nxe*ny] = 0.0;
}
q[0] += q[nx+nxe*ny];
q[nx+nxe*ny] = 0.0;
return;
}
/*--------------------------------------------------------------------*/
void cvmpois22(float complex q[], float complex fxy[], int isign,
float complex ffc[], float ax, float ay, float affp,
float *we, int nx, int ny, int nxvh, int nyv, int nxhd,
int nyhd) {
/* this subroutine solves 2d poisson's equation in fourier space for
force/charge (or convolution of electric field over particle shape)
with periodic boundary conditions.
for isign = 0, input: isign,ax,ay,affp,nx,ny,nxvh,nyhd, output: ffc
for isign /= 0, input: q,ffc,isign,nx,ny,nxvh,nyhd, output: fxy,we
approximate flop count is: 26*nxc*nyc + 12*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
equation used is:
fx[ky][kx] = -sqrt(-1)*kx*g[ky][kx]*s[ky][kx]*q[ky][kx],
fy[ky][kx] = -sqrt(-1)*ky*g[ky][kx]*s[ky][kx]*q[ky][kx],
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[ky][kx] = (affp/(kx**2+ky**2))*s(kx,ky),
s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and
fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0.
q[k][j] = complex charge density for fourier mode (j,k)
fxy[k][j][0] = x component of complex force/charge,
fxy[k][j][1] = y component of complex force/charge,
all for fourier mode (j,k)
if isign = 0, form factor array is prepared
if isign is not equal to 0, force/charge is calculated
cimag(ffc[k][j]) = finite-size particle shape factor s
for fourier mode (j,k)
creal(ffc[k][j]) = potential green's function g
for fourier mode (j,k)
ax/ay = half-width of particle in x/y direction
affp = normalization constant = nx*ny/np, where np=number of particles
electric field energy is also calculated, using
we = nx*ny*sum((affp/(kx**2+ky**2))*|q[ky][kx]*s[ky][kx]|**2)
nx/ny = system length in x/y direction
nxvh = first dimension of field arrays, must be >= nxh
nyv = second dimension of field arrays, must be >= ny
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
vectorizable version
local data */
int nxh, nyh, j, k, k1, kk, kj;
float dnx, dny, dkx, dky, at1, at2, at3, at4;
float complex zero, zt1, zt2;
double wp, sum1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
if (isign != 0)
goto L30;
/* prepare form factor array */
for (k = 0; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
at1 = dky*dky;
at2 = pow((dky*ay),2);
for (j = 0; j < nxh; j++) {
dkx = dnx*(float) j;
at3 = dkx*dkx + at1;
at4 = exp(-0.5*(pow((dkx*ax),2) + at2));
if (at3==0.0) {
ffc[j+kk] = affp + 1.0*_Complex_I;
}
else {
ffc[j+kk] = (affp*at4/at3) + at4*_Complex_I;
}
}
}
return;
/* calculate force/charge and sum field energy */
L30: sum1 = 0.0;
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \
reduction(+:sum1)
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
for (k = 1; k < nyh; k++) {
k1 = ny - k;
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
wp = 0.0;
#pragma ivdep
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I;
zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I;
fxy[2*j+2*kj] = at2*zt1;
fxy[1+2*j+2*kj] = at3*zt1;
fxy[2*j+2*k1] = at2*zt2;
fxy[1+2*j+2*k1] = -at3*zt2;
at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1]));
wp += (double) at1;
}
at1 = crealf(ffc[kk])*cimagf(ffc[kk]);
at3 = at1*dny*(float) k;
zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I;
fxy[2*kj] = zero;
fxy[1+2*kj] = at3*zt1;
fxy[2*k1] = zero;
fxy[1+2*k1] = zero;
at1 = at1*(q[kj]*conjf(q[kj]));
wp += (double) at1;
sum1 += wp;
}
wp = 0.0;
/* mode numbers ky = 0, ny/2 */
k1 = 2*nxvh*nyh;
#pragma ivdep
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j])*cimagf(ffc[j]);
at2 = at1*dnx*(float) j;
zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I;
fxy[2*j] = at2*zt1;
fxy[1+2*j] = zero;
fxy[2*j+k1] = zero;
fxy[1+2*j+k1] = zero;
at1 = at1*(q[j]*conjf(q[j]));
wp += (double) at1;
}
fxy[0] = zero;
fxy[1] = zero;
fxy[k1] = zero;
fxy[1+k1] = zero;
sum1 += wp;
*we = sum1*(float) (nx*ny);
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rinit(int mixup[], float complex sct[], int indx, int indy,
int nxhyd, int nxyhd) {
/* this subroutine calculates tables needed by a two dimensional
real to complex fast fourier transform and its inverse.
input: indx, indy, nxhyd, nxyhd
output: mixup, sct
mixup = array of bit reversed addresses
sct = sine/cosine table
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
nxhyd = maximum of (nx/2,ny)
nxyhd = one half of maximum of (nx,ny)
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nxy, nxhy, nxyh;
int j, k, lb, ll, jb, it;
float dnxy, arg;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
/* bit-reverse index table: mixup[j] = 1 + reversed bits of j */
for (j = 0; j < nxhy; j++) {
lb = j;
ll = 0;
for (k = 0; k < indx1y; k++) {
jb = lb/2;
it = lb - 2*jb;
lb = jb;
ll = 2*ll + it;
}
mixup[j] = ll + 1;
}
/* sine/cosine table for the angles 2*n*pi/nxy */
nxyh = nxy/2;
dnxy = 6.28318530717959/(float) nxy;
for (j = 0; j < nxyh; j++) {
arg = dnxy*(float) j;
sct[j] = cosf(arg) - sinf(arg)*_Complex_I;
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rvmxx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nyi,
int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the x part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform in x is performed
f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx))
if isign = 1, a forward fourier transform in x is performed
f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = first dimension of f >= nx/2
nyd = second dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0]) = real part of mode nx/2,0 and
imag(f[0][ny/2]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt;
int nrx, i, j, k, l, j1, k1, k2, ns, ns2, km, kmr, nrxb, joff;
float ani;
float complex t1, t2, t3;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nyt = nyi + nyp - 1;
if (isign > 0)
goto L70;
/* inverse fourier transform */
nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,joff,ani,t1,t2,t3)
for (i = nyi-1; i < nyt; i++) {
joff = nxhd*i;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
t1 = sct[kmr*j];
t2 = t1*f[j+k2+joff];
f[j+k2+joff] = f[j+k1+joff] - t2;
f[j+k1+joff] += t2;
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
ani = 0.5/(((float) nx)*((float) ny));
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = ani*(t1 + t2);
f[nxh-j+joff] = ani*conjf(t1 - t2);
}
ani = 2.0*ani;
f[nxhh+joff] = ani*conjf(f[nxhh+joff]);
f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I);
}
return;
/* forward fourier transform */
L70: nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,joff,t1,t2,t3)
for (i = nyi-1; i < nyt; i++) {
joff = nxhd*i;
/* scramble coefficients */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = t1 + t2;
f[nxh-j+joff] = conjf(t1 - t2);
}
f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]);
f[joff] = (crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
t1 = conjf(sct[kmr*j]);
t2 = t1*f[j+k2+joff];
f[j+k2+joff] = f[j+k1+joff] - t2;
f[j+k1+joff] += t2;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rmxy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxi,
int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the y part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of x,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform in y is performed
f[m][n] = sum(f[k][j]*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform in y is performed
f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nxi = initial x index used
nxp = number of x indices used
nxhd = first dimension of f >= nx/2
nyd = second dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0]) = real part of mode nx/2,0 and
imag(f[0][ny/2]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt;
int nry, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, nryb, koff;
float complex t1, t2;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nxt = nxi + nxp - 1;
if (isign > 0)
goto L70;
/* inverse fourier transform */
nryb = nxhy/ny;
nry = nxy/ny;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,t1,t2)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1;
t1 = f[i+k1];
f[i+k1] = f[i+koff];
f[i+koff] = t1;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1);
j2 = nxhd*(j + k2);
t1 = sct[kmr*j];
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = nxhd*k;
k1 = nxhd*ny - koff;
t1 = f[k1];
f[k1] = 0.5*(cimagf(f[koff] + t1)
+ crealf(f[koff] - t1)*_Complex_I);
f[koff] = 0.5*(crealf(f[koff] + t1)
+ cimagf(f[koff] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L70: nryb = nxhy/ny;
nry = nxy/ny;
/* scramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = nxhd*k;
k1 = nxhd*ny - koff;
t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I;
f[k1] = conjf(f[koff] - t1);
f[koff] += t1;
}
}
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,t1,t2)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1;
t1 = f[i+k1];
f[i+k1] = f[i+koff];
f[i+koff] = t1;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1);
j2 = nxhd*(j + k2);
t1 = conjf(sct[kmr*j]);
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rvm2x(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nyi,
int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the x part of 2 two dimensional real to
complex fast fourier transforms, and their inverses, for a subset of
y, using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, two inverse fourier transforms in x are performed
f[m][n][0:1] = (1/nx*ny)*sum(f[k][j][0:1]*exp(-sqrt(-1)*2pi*n*j/nx))
if isign = 1, two forward fourier transforms in x are performed
f[k][j][0:1] = sum(f[m][n][0:1]*exp(sqrt(-1)*2pi*n*j/nx))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = second dimension of f >= nx/2
nyd = third dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j][0:1] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1][0:1] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0][0:1]) = real part of mode nx/2,0 and
imag(f[0][ny/2][0:1]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt;
int nrx, i, j, k, l, jj, j1, k1, k2, ns, ns2, km, kmr, joff;
int nrxb;
float at1, ani;
float complex t1, t2, t3;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nyt = nyi + nyp - 1;
if (isign > 0)
goto L100;
/* inverse fourier transform */
nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,joff,at1,ani,t1,t2,t3)
for (i = nyi-1; i < nyt; i++) {
joff = 2*nxhd*i;
/* swap complex components */
for (j = 0; j < nxh; j++) {
at1 = cimagf(f[2*j+joff]);
f[2*j+joff] = crealf(f[2*j+joff])
+ crealf(f[1+2*j+joff])*_Complex_I;
f[1+2*j+joff] = at1 + cimagf(f[1+2*j+joff])*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[2*j1+joff];
t2 = f[1+2*j1+joff];
f[2*j1+joff] = f[2*j+joff];
f[1+2*j1+joff] = f[1+2*j+joff];
f[2*j+joff] = t1;
f[1+2*j+joff] = t2;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = 2*ns2*k;
k2 = k1 + 2*ns;
for (j = 0; j < ns; j++) {
t1 = sct[kmr*j];
t2 = t1*f[2*j+k2+joff];
t3 = t1*f[1+2*j+k2+joff];
f[2*j+k2+joff] = f[2*j+k1+joff] - t2;
f[1+2*j+k2+joff] = f[1+2*j+k1+joff] - t3;
f[2*j+k1+joff] += t2;
f[1+2*j+k1+joff] += t3;
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
ani = 0.5/(((float) nx)*((float) ny));
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 2; jj++) {
t2 = conjf(f[jj+2*(nxh-j)+joff]);
t1 = f[jj+2*j+joff] + t2;
t2 = (f[jj+2*j+joff] - t2)*t3;
f[jj+2*j+joff] = ani*(t1 + t2);
f[jj+2*(nxh-j)+joff] = ani*conjf(t1 - t2);
}
}
ani = 2.0*ani;
for (jj = 0; jj < 2; jj++) {
f[jj+2*nxhh+joff] = ani*conjf(f[jj+2*nxhh+joff]);
f[jj+joff] = ani*((crealf(f[jj+joff]) + cimagf(f[jj+joff]))
+ (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I);
}
}
return;
/* forward fourier transform */
L100: nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,joff,at1,t1,t2,t3)
for (i = nyi-1; i < nyt; i++) {
joff = 2*nxhd*i;
/* scramble coefficients */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 2; jj++) {
t2 = conjf(f[jj+2*(nxh-j)+joff]);
t1 = f[jj+2*j+joff] + t2;
t2 = (f[jj+2*j+joff] - t2)*t3;
f[jj+2*j+joff] = t1 + t2;
f[jj+2*(nxh-j)+joff] = conjf(t1 - t2);
}
}
for (jj = 0; jj < 2; jj++) {
f[jj+2*nxhh+joff] = 2.0*conjf(f[jj+2*nxhh+joff]);
f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff]))
+ (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[2*j1+joff];
t2 = f[1+2*j1+joff];
f[2*j1+joff] = f[2*j+joff];
f[1+2*j1+joff] = f[1+2*j+joff];
f[2*j+joff] = t1;
f[1+2*j+joff] = t2;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = 2*ns2*k;
k2 = k1 + 2*ns;
for (j = 0; j < ns; j++) {
t1 = conjf(sct[kmr*j]);
t2 = t1*f[2*j+k2+joff];
t3 = t1*f[1+2*j+k2+joff];
f[2*j+k2+joff] = f[2*j+k1+joff] - t2;
f[1+2*j+k2+joff] = f[1+2*j+k1+joff] - t3;
f[2*j+k1+joff] += t2;
f[1+2*j+k1+joff] += t3;
}
}
ns = ns2;
}
/* swap complex components */
for (j = 0; j < nxh; j++) {
at1 = cimagf(f[2*j+joff]);
f[2*j+joff] = crealf(f[2*j+joff])
+ crealf(f[1+2*j+joff])*_Complex_I;
f[1+2*j+joff] = at1 + cimagf(f[1+2*j+joff])*_Complex_I;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rm2y(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxi,
int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the y part of 2 two dimensional real to
complex fast fourier transforms, and their inverses, for a subset of
x, using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, two inverse fourier transforms in y are performed
f[m][n][0:1] = *sum(f[k][j][0:1]*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, two forward fourier transforms in y are performed
f[k][j][0:1] = sum(f[m][n][0:1]*exp(sqrt(-1)*2pi*n*j/nx))
mixup = array of bit reversed addresses
sct = sine/cosine table
nxi = initial x index used
nxp = number of x indices used
nxhd = second dimension of f >= nx/2
nyd = third dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j][0:1] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1][0:1] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0][0:1]) = real part of mode nx/2,0 and
imag(f[0][ny/2][0:1]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt;
int nry, i, j, k, l, jj, j1, j2, k1, k2, ns, ns2, km, kmr, koff;
int nryb;
float complex t1, t2, t3;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nxt = nxi + nxp - 1;
if (isign > 0)
goto L80;
/* inverse fourier transform */
nryb = nxhy/ny;
nry = nxy/ny;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,t1,t2,t3)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = 2*nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = 2*nxhd*k1;
t1 = f[2*i+k1];
t2 = f[1+2*i+k1];
f[2*i+k1] = f[2*i+koff];
f[1+2*i+k1] = f[1+2*i+koff];
f[2*i+koff] = t1;
f[1+2*i+koff] = t2;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = 2*nxhd*(j + k1);
j2 = 2*nxhd*(j + k2);
t1 = sct[kmr*j];
t2 = t1*f[2*i+j2];
t3 = t1*f[1+2*i+j2];
f[2*i+j2] = f[2*i+j1] - t2;
f[1+2*i+j2] = f[1+2*i+j1] - t3;
f[2*i+j1] += t2;
f[1+2*i+j1] += t3;
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = 2*nxhd*k;
k1 = 2*nxhd*ny - koff;
for (jj = 0; jj < 2; jj++) {
t1 = f[jj+k1];
f[jj+k1] = 0.5*(cimagf(f[jj+koff] + t1)
+ crealf(f[jj+koff] - t1)*_Complex_I);
f[jj+koff] = 0.5*(crealf(f[jj+koff] + t1)
+ cimagf(f[jj+koff] - t1)*_Complex_I);
}
}
}
return;
/* forward fourier transform */
L80: nryb = nxhy/ny;
nry = nxy/ny;
/* scramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = 2*nxhd*k;
k1 = 2*nxhd*ny - koff;
for (jj = 0; jj < 2; jj++) {
t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I;
f[jj+k1] = conjf(f[jj+koff] - t1);
f[jj+koff] += t1;
}
}
}
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,t1,t2,t3)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = 2*nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = 2*nxhd*k1;
t1 = f[2*i+k1];
t2 = f[1+2*i+k1];
f[2*i+k1] = f[2*i+koff];
f[1+2*i+k1] = f[1+2*i+koff];
f[2*i+koff] = t1;
f[1+2*i+koff] = t2;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = 2*nxhd*(j + k1);
j2 = 2*nxhd*(j + k2);
t1 = conjf(sct[kmr*j]);
t2 = t1*f[2*i+j2];
t3 = t1*f[1+2*i+j2];
f[2*i+j2] = f[2*i+j1] - t2;
f[1+2*i+j2] = f[1+2*i+j1] - t3;
f[2*i+j1] += t2;
f[1+2*i+j1] += t3;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rvmx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxhd,
int nyd, int nxhyd, int nxyhd) {
/* wrapper function for real to complex fft, with packed data */
/* parallelized with OpenMP */
/* local data */
int nxh, ny;
static int nxi = 1, nyi = 1;
/* calculate range of indices */
nxh = 1L<<(indx - 1);
ny = 1L<<indy;
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cfft2rvmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
/* perform y fft */
cfft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform y fft */
cfft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
/* perform x fft */
cfft2rvmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
}
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rvm2(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxhd,
int nyd, int nxhyd, int nxyhd) {
/* wrapper function for 2 2d real to complex ffts, with packed data */
/* parallelized with OpenMP */
/* local data */
int nxh, ny;
static int nxi = 1, nyi = 1;
/* calculate range of indices */
nxh = 1L<<(indx - 1);
ny = 1L<<indy;
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cfft2rvm2x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
/* perform y fft */
cfft2rm2y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform y fft */
cfft2rm2y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
/* perform x fft */
cfft2rvm2x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
void cdistr2_(float *part, float *vtx, float *vty, float *vdx, float *vdy,
int *npx, int *npy, int *idimp, int *nop, int *nx, int *ny,
int *ipbc) {
cdistr2(part,*vtx,*vty,*vdx,*vdy,*npx,*npy,*idimp,*nop,*nx,*ny,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cdblkp2l_(float *part, int *kpic, int *nppmx, int *idimp, int *nop,
int *mx, int *my, int *mx1, int *mxy1, int *irc) {
cdblkp2l(part,kpic,nppmx,*idimp,*nop,*mx,*my,*mx1,*mxy1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppmovin2lt_(float *part, float *ppart, int *kpic, int *nppmx,
int *idimp, int *nop, int *mx, int *my, int *mx1,
int *mxy1, int *irc) {
cppmovin2lt(part,ppart,kpic,*nppmx,*idimp,*nop,*mx,*my,*mx1,*mxy1,
irc);
return;
}
/*--------------------------------------------------------------------*/
void cppmovin2ltp_(float *part, float *ppart, int *kpic, int *kp,
int *nppmx, int *idimp, int *nop, int *mx, int *my,
int *mx1, int *mxy1, int *irc) {
cppmovin2ltp(part,ppart,kpic,kp,*nppmx,*idimp,*nop,*mx,*my,*mx1,
*mxy1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppcheck2lt_(float *ppart, int *kpic, int *idimp, int *nppmx,
int *nx, int *ny, int *mx, int *my, int *mx1,
int *my1, int *irc) {
cppcheck2lt(ppart,kpic,*idimp,*nppmx,*nx,*ny,*mx,*my,*mx1,*my1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgppush2lt_(float *ppart, float *fxy, int *kpic, float *qbm,
float *dt, float *ek, int *idimp, int *nppmx, int *nx,
int *ny, int *mx, int *my, int *nxv, int *nyv,
int *mx1, int *mxy1, int *ipbc) {
cgppush2lt(ppart,fxy,kpic,*qbm,*dt,ek,*idimp,*nppmx,*nx,*ny,*mx,*my,
*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cgppushf2lt_(float *ppart, float *fxy, int *kpic, int *ncl,
int *ihole, float *qbm, float *dt, float *ek,
int *idimp, int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1, int *mxy1,
int *ntmax, int *irc) {
cgppushf2lt(ppart,fxy,kpic,ncl,ihole,*qbm,*dt,ek,*idimp,*nppmx,*nx,
*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cvgppush2lt_(float *ppart, float *fxy, int *kpic, float *qbm,
float *dt, float *ek, int *idimp, int *nppmx, int *nx,
int *ny, int *mx, int *my, int *nxv, int *nyv,
int *mx1, int *mxy1, int *ipbc) {
cvgppush2lt(ppart,fxy,kpic,*qbm,*dt,ek,*idimp,*nppmx,*nx,*ny,*mx,*my,
*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cvgppushf2lt_(float *ppart, float *fxy, int *kpic, int *ncl,
int *ihole, float *qbm, float *dt, float *ek,
int *idimp, int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1, int *mxy1,
int *ntmax, int *irc) {
cvgppushf2lt(ppart,fxy,kpic,ncl,ihole,*qbm,*dt,ek,*idimp,*nppmx,*nx,
*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgppost2lt_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *nxv,
int *nyv, int *mx1, int *mxy1) {
cgppost2lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1,
*mxy1);
return;
}
/*--------------------------------------------------------------------*/
void cvgppost2lt_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *nxv,
int *nyv, int *mx1, int *mxy1) {
cvgppost2lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1,
*mxy1);
return;
}
/*--------------------------------------------------------------------*/
void cviscan2_(int *isdata, int *mb, int *nths) {
cviscan2(isdata,mb,*nths);
return;
}
/*--------------------------------------------------------------------*/
void cpporder2lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *nx, int *ny,
int *mx, int *my, int *mx1, int *my1, int *npbmx,
int *ntmax, int *irc) {
cpporder2lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*mx,
*my,*mx1,*my1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cpporderf2lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *mx1,
int *my1, int *npbmx, int *ntmax, int *irc) {
cpporderf2lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,
*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cvpporder2lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *nx, int *ny,
int *mx, int *my, int *mx1, int *my1, int *npbmx,
int *ntmax, int *irc) {
cvpporder2lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*mx,
*my,*mx1,*my1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cvpporderf2lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *mx1,
int *my1, int *npbmx, int *ntmax, int *irc) {
cvpporderf2lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,
*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void ccguard2l_(float *fxy, int *nx, int *ny, int *nxe, int *nye) {
ccguard2l(fxy,*nx,*ny,*nxe,*nye);
return;
}
/*--------------------------------------------------------------------*/
void caguard2l_(float *q, int *nx, int *ny, int *nxe, int *nye) {
caguard2l(q,*nx,*ny,*nxe,*nye);
return;
}
/*--------------------------------------------------------------------*/
void cvmpois22_(float complex *q, float complex *fxy, int *isign,
float complex *ffc, float *ax, float *ay, float *affp,
float *we, int *nx, int *ny, int *nxvh, int *nyv,
int *nxhd, int *nyhd) {
cvmpois22(q,fxy,*isign,ffc,*ax,*ay,*affp,we,*nx,*ny,*nxvh,*nyv,*nxhd,
*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rinit_(int *mixup, float complex *sct, int *indx, int *indy,
int *nxhyd, int *nxyhd) {
cwfft2rinit(mixup,sct,*indx,*indy,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rvmxx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nyi,
int *nyp, int *nxhd, int *nyd, int *nxhyd,
int *nxyhd) {
cfft2rvmxx(f,*isign,mixup,sct,*indx,*indy,*nyi,*nyp,*nxhd,*nyd,
*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rmxy_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxi,
int *nxp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) {
cfft2rmxy(f,*isign,mixup,sct,*indx,*indy,*nxi,*nxp,*nxhd,*nyd,*nxhyd,
*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rvm2x_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nyi,
int *nyp, int *nxhd, int *nyd, int *nxhyd,
int *nxyhd) {
cfft2rvm2x(f,*isign,mixup,sct,*indx,*indy,*nyi,*nyp,*nxhd,*nyd,
*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rm2y_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxi,
int *nxp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) {
cfft2rm2y(f,*isign,mixup,sct,*indx,*indy,*nxi,*nxp,*nxhd,*nyd,*nxhyd,
*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rvmx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxhd,
int *nyd, int *nxhyd, int *nxyhd) {
cwfft2rvmx(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rvm2_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxhd,
int *nyd, int *nxhyd, int *nxyhd) {
cwfft2rvm2(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,*nxyhd);
return;
}
|
GB_binop__bor_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__bor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint32)
// A*D function (colscale): GB (_AxD__bor_uint32)
// D*A function (rowscale): GB (_DxB__bor_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__bor_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint32)
// C=scalar+B GB (_bind1st__bor_uint32)
// C=scalar+B' GB (_bind1st_tran__bor_uint32)
// C=A+scalar GB (_bind2nd__bor_uint32)
// C=A'+scalar GB (_bind2nd_tran__bor_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT32 || GxB_NO_BOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bor_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
support_classes.h | #include <random>
#include <iostream>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <unordered_map>
#include <unordered_set>
#include <map>
#include <cmath>
#include <ctime>
#include <queue>
#include <vector>
#include <omp.h>
#include <chrono>
#include <limits>
#include <sys/time.h>
#include <algorithm>
#include <ctime>
#include "support_func.h"
using namespace std;
class StopW {
std::chrono::steady_clock::time_point time_begin;
public:
StopW() {
time_begin = std::chrono::steady_clock::now();
}
float getElapsedTimeMicro() {
std::chrono::steady_clock::time_point time_end = std::chrono::steady_clock::now();
return (std::chrono::duration_cast<std::chrono::microseconds>(time_end - time_begin).count());
}
void reset() {
time_begin = std::chrono::steady_clock::now();
}
};
class ExactKNN {
public:
int K;
vector <vector <uint32_t> > matrixNN;
void Build(int k, vector<float> dataset, size_t N, size_t d, Metric *metric);
void BuildThreshold(float thr, vector<float> dataset, size_t N, size_t d, Metric *metric);
};
void ExactKNN::Build(int k, vector<float> dataset, size_t N, size_t d, Metric *metric) {
K = k;
vector<uint32_t> sloy(K);
for (int i=0; i < N; ++i) {
matrixNN.push_back(sloy);
}
#pragma omp parallel for
for (int i = 0; i < N; ++i) {
int start = 0;
if (i == 0) start = 1;
const float *point_i = dataset.data() + i*d;
const float *point_start = dataset.data() + start*d;
float dist = metric->Dist(point_i, point_start, d);
neighbor neig = {start, dist};
priority_queue<neighbor> heap;
heap.push(neig);
for (int j = start + 1; j < N; ++j) {
if (j != i) {
const float *point_j = dataset.data() + j*d;
float dist = metric->Dist(point_i, point_j, d);
neighbor neig = {j, dist};
heap.push(neig);
if (heap.size() > K) {
heap.pop();
}
}
}
vector<uint32_t> layer;
while (heap.size() > 0) {
layer.push_back(heap.top().number);
heap.pop();
}
matrixNN[i] = layer;
}
}
void ExactKNN::BuildThreshold(float thr, vector<float> dataset, size_t N, size_t d, Metric *metric) {
vector<uint32_t> sloy;
for (int i=0; i < N; ++i) {
matrixNN.push_back(sloy);
}
#pragma omp parallel for
for (int i = 0; i < N; ++i) {
const float *point_i = dataset.data() + i*d;
for (int j = 0; j < N; ++j) {
if (j != i) {
const float *point_j = dataset.data() + j*d;
if (metric->Dist(point_i, point_j, d) < thr) {
matrixNN[i].push_back(j);
}
}
}
}
}
class KLgraph {
public:
int L;
vector< vector <uint32_t> > longmatrixNN;
void BuildByNumber(int l, vector<float> dataset, size_t N, size_t d, std::mt19937 random_gen,
Metric *metric);
void BuildByNumberCustom(int l, vector<float> dataset, size_t N, size_t d, size_t sqrtN, std::mt19937 random_gen,
Metric *metric);
void BuildByDist(int l, vector<float> dataset, size_t N, size_t d, std::mt19937 random_gen,
Metric *metric);
};
void KLgraph::BuildByNumber(int l, vector<float> dataset, size_t N, size_t d, std::mt19937 random_gen,
Metric *metric){
L = l;
vector<uint32_t> sloy;
for (int i=0; i < N; ++i) {
longmatrixNN.push_back(sloy);
}
vector<float> custom_prob;
for (int i=0; i < N - 1; ++i) {
custom_prob.push_back(1. / (i+ 1) );
}
discrete_distribution<int> custom_distr (custom_prob.begin(), custom_prob.end());
#pragma omp parallel for
for(int i=0; i < N; ++i) {
int num;
const float *point_i = dataset.data() + i*d;
vector<neighbor> chosen_neigs;
set<neighbor> chn_neigs;
for (int j = 0; j < N; ++j) {
if (i != j) {
const float *point_j = dataset.data() + j * d;
float dist = metric->Dist(point_i, point_j, d);
neighbor neig{j, dist};
chosen_neigs.push_back(neig);
}
}
sort(chosen_neigs.begin(), chosen_neigs.end());
unordered_set <int> ll;
while (ll.size() < L) {
num = custom_distr(random_gen);
ll.insert(num);
}
for (auto el : ll) {
longmatrixNN[i].push_back(chosen_neigs[el].number);
}
}
}
void KLgraph::BuildByNumberCustom(int l, vector<float> dataset, size_t N, size_t d, size_t sqrtN, std::mt19937 random_gen,
Metric *metric){
cout << sqrtN << ' ' << N << endl;
L = l;
vector<uint32_t> sloy;
for (int i=0; i < N; ++i) {
longmatrixNN.push_back(sloy);
}
vector<float> custom_prob;
for (int i=0; i < sqrtN; ++i) {
custom_prob.push_back(1. / (i+ 1) );
}
discrete_distribution<int> custom_distr (custom_prob.begin(), custom_prob.end());
uniform_int_distribution<int> uniform_distr(0, N - 1);
#pragma omp parallel for
for(int i=0; i < N; ++i) {
int num;
const float *point_i = dataset.data() + i * d;
vector<neighbor> chosen_neigs;
set<neighbor> chn_neigs;
while (chn_neigs.size() < sqrtN) {
num = uniform_distr(random_gen);
if (num != i) {
const float *point_num = dataset.data() + num * d;
float dist = metric->Dist(point_i, point_num, d);
neighbor neig{num, dist};
chn_neigs.insert(neig);
}
}
for (auto el : chn_neigs) {
chosen_neigs.push_back(el);
}
sort(chosen_neigs.begin(), chosen_neigs.end());
unordered_set <int> ll;
while (ll.size() < L) {
num = custom_distr(random_gen);
ll.insert(num);
}
for (auto el : ll) {
longmatrixNN[i].push_back(chosen_neigs[el].number);
}
}
}
void KLgraph::BuildByDist(int l, vector<float> dataset, size_t N, size_t d, std::mt19937 random_gen,
Metric *metric){
L = l;
float thr = 0.03;
vector<uint32_t> sloy;
for (int i=0; i < N; ++i) {
longmatrixNN.push_back(sloy);
}
#pragma omp parallel for
for(int i=0; i < N; ++i) {
int num;
const float *point_i = dataset.data() + i*d;
vector<neighbor> chosen_neigs;
for (int j = 0; j < N; ++j) {
if (i != j) {
const float *point_j = dataset.data() + j * d;
float dist = metric->Dist(point_i, point_j, d);
if (dist > thr) {
neighbor neig{j, dist};
chosen_neigs.push_back(neig);
}
}
}
unordered_set <int> ll;
vector<float> custom_prob;
for (int j = 0; j < chosen_neigs.size(); ++j) {
float dist_cur = chosen_neigs[j].dist;
custom_prob.push_back(pow(pow(dist_cur, -1), d));
}
discrete_distribution<int> custom_distr (custom_prob.begin(), custom_prob.end());
while (ll.size() < L) {
num = custom_distr(random_gen);
ll.insert(num);
}
for (auto el : ll) {
longmatrixNN[i].push_back(chosen_neigs[el].number);
}
}
} |
Example_copyprivate.1.c | /*
* @@name: copyprivate.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
#include <stdio.h>
float x, y;
#pragma omp threadprivate(x, y)
void init(float a, float b ) {
#pragma omp single copyprivate(a,b,x,y)
{
scanf("%f %f %f %f", &a, &b, &x, &y);
}
}
|
1875.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
matrix.c | #include "../include/matrix.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef M_ASSERTS
#include <assert.h>
#endif
/*
* TODO:MAKE UNIT TEST
void main(){
matrix_t *A = M_rand(10,20,-2,2),
*B = M_rand(20,10,-3,2),
*C,*D;
C = M_dot(A,B);
M_print(C);
M_store(C,"test.dat");
D = M_load("test.dat");
M_print(D);
M_print(A);
M_print(C);
C = M_mul(B,3.3);
puts("mult by constant");
M_free(B);
B = C;
for(int i=0;i<10;i++){
C = M_dot(A,B);
M_free(C);
}
puts("dot product x10");
D = M_mul(M_dot(C,C),1./2.);
puts("mult by constat and dot product");
D = M_sum(D,D);
puts("sum");
for(int i=0;i<1000;i++){
C = M_transpose(A);
M_free(C);
}
puts("transposex10");
//M_print(D);
}
*/
void M_free(matrix_t *m) {
free(m->data);
free(m);
}
matrix_t *M_load(char *f_path){
FILE *infile;
matrix_t *res = M_empty();
infile = fopen(f_path, "r");
if(infile == NULL){
puts("Error opening the file");
return NULL;
}
if(!fread(res,sizeof(size_t),2,infile)){
puts("Error reading the file");
fclose(infile);
return NULL;
}
res->data = malloc(sizeof(m_element_t)*res->cols*res->rows);
if(!fread(res->data,sizeof(m_element_t),res->cols*res->rows,infile)){
puts("Error reading the file");
fclose(infile);
return NULL;
}
fclose(infile);
return res;
}
int M_store(matrix_t *m,char *f_path){
FILE *outfile;
outfile = fopen(f_path, "w");
if(outfile == NULL){
puts("Error opening the file");
return -1;
}
if(!fwrite(m,sizeof(size_t),2,outfile)){
puts("Error writing the file");
fclose(outfile);
return -1;
}
if(!fwrite(m->data,sizeof(m_element_t),m->cols*m->rows,outfile)){
puts("Error writing the file");
fclose(outfile);
return -1;
}
fclose(outfile);
return 0;
}
matrix_t *M_dot_T(matrix_t *a, matrix_t *b){
//assert(a->rows==b->rows);
matrix_t *res = M_new(a->cols, b->cols);
size_t i,j,k;
m_element_t sum = 0.0;
#pragma omp parallel for private(i,j,k,sum)
for(i=0;i<res->rows;i++)
for(j=0;j<res->cols;j++){
for(k=0;k<b->rows;k++)
sum += M_get(a,k,i)* M_get(b,k,j);
M_get(res,i,j) = sum;
sum = 0.;
}
return res;
}
matrix_t *M_dot(matrix_t *a, matrix_t *b){
#ifdef M_ASSERTS
assert(a->cols==b->rows);
#endif
matrix_t *res = M_new(a->rows, b->cols);
size_t i,j,k;
m_element_t sum = 0.0;
#pragma omp parallel for private(i,j,k,sum)
for(i=0;i<res->rows;i++)
for(j=0;j<res->cols;j++){
for(k=0;k<b->rows;k++)
sum += M_get(a,i,k)* M_get(b,k,j);
M_get(res,i,j) = sum;
sum = 0.;
}
return res;
}
matrix_t *M_new(size_t rows, size_t cols){
matrix_t *out = M_empty();
out->data = malloc(rows*cols*sizeof(m_element_t*));
out->rows = rows;
out->cols = cols;
return out;
}
matrix_t *M_zeros(size_t rows, size_t cols){
matrix_t *out = M_empty();
out->data = calloc(rows*cols,sizeof(m_element_t));
out->rows = rows;
out->cols = cols;
return out;
}
matrix_t *M_ones(size_t rows, size_t cols){
matrix_t *out = M_new(rows,cols);
for(size_t i=0;i<out->cols*out->rows;i++)
out->data[i]=1;
return out;
}
matrix_t *M_rand(size_t rows, size_t cols, m_element_t min, m_element_t max){
matrix_t *res = M_new(rows, cols);
for(size_t i=0;i< res->rows*res->cols; i++)
res->data[i] = min + (m_element_t)rand()/(m_element_t)(RAND_MAX/max);
return res;
}
matrix_t *M_copy(matrix_t *src){
matrix_t *dst = M_new(src->rows,src->cols);
memcpy(dst->data,src->data,src->cols*src->rows);
return dst;
}
matrix_t *M_mul_scalar(matrix_t *a, m_element_t b){
matrix_t *res = M_new(a->rows, a->cols);
for(size_t i=0;i<a->rows*a->cols;i++)
res->data[i] = a->data[i]*b;
return res;
}
matrix_t *M_sum_scalar(matrix_t *a, m_element_t b){
matrix_t *res = M_new(a->rows, a->cols);
for(size_t i=0;i<a->rows*a->cols;i++)
res->data[i] = a->data[i]+b;
return res;
}
matrix_t *M_sum(matrix_t *a, matrix_t *b){
#ifdef M_ASSERTS
assert(a->cols==b->cols);
assert(a->rows==b->rows);
#endif
matrix_t *res = M_new(a->rows, a->cols);
for(size_t i=0;i<res->cols*res->rows;i++)
res->data[i] = a->data[i] + b->data[i];
return res;
}
matrix_t *M_sub(matrix_t *a, matrix_t *b){
#ifdef M_ASSERTS
assert(a->cols==b->cols);
assert(a->rows==b->rows);
#endif
matrix_t *res = M_new(a->rows, a->cols);
for(size_t i=0;i<res->cols*res->rows;i++)
res->data[i] = a->data[i] - b->data[i];
return res;
}
matrix_t *M_transpose(matrix_t *a){
size_t i,j;
matrix_t *res = M_new(a->cols,a->rows);
for(i=0;i<res->rows;i++)
for(j=0;j<res->cols;j++)
M_get(res,i,j) = M_get(a,j,i);
return res;
}
matrix_t *M_identity(size_t n){
matrix_t *res = M_zeros(n,n);
for(size_t i=0;i<n;i++)
M_get(res,i,i) = 1;
return res;
}
void M_print(matrix_t *m){
size_t i,j;
for(i=0;i<m->rows;i++){
for(j=0;j<m->cols;j++)
printf("%f ",M_get(m,i,j));
printf("\n");
}
}
|
GB_unop__identity_uint16_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint16_uint32)
// op(A') function: GB (_unop_tran__identity_uint16_uint32)
// C type: uint16_t
// A type: uint32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = (uint16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint16_uint32)
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint16_t z = (uint16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint16_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
middle6r.c | /*
* Date: 11 December 2015
* Contact: Thomas Peyrin - thomas.peyrin@gmail.com
*/
/*
* Simulation of boomerang analysis for Skinny
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: hsn.hadipour@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
// #define DEBUG 1
#define Nthreads 2 // Number of parallel threads utilized in this program
#define NumOfExperiments 100 // Number of independent experiments
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void init_prng(int offset) {
// unsigned int initial_seed = 0x5ED90662;
// unsigned int initial_seed = 0x30051991; My birthday!
unsigned int initial_seed = 10*time(NULL) + 11*offset;
srand(initial_seed); // Initialization, should only be called once. int r = rand();
printf("[+] PRNG initialized to 0x%08X\n", initial_seed);
}
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *k1, unsigned char *k2, unsigned char *k3, unsigned char *k4)
{
int i;
unsigned char p1[16], p2[16];
unsigned char c3[16], c4[16];
int num = 0;
for (int t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
p1[i] = rand() & 0xff;
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
p2[i] = p1[i] ^ dp[i];
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
c3[i] = p1[i] ^ dc[i];
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
c4[i] = p2[i] ^ dc[i];
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
// randomly choose k1
srand((unsigned)time(NULL));
unsigned char k1[48], k2[48], k3[48], k4[48];
// randomly choose k1
for (int i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (int i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (int i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (int i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int num = 0;
int ID = omp_get_thread_num();
init_prng(ID);
for (int j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, k1, k2, k3, k4);
}
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
int main()
{
// srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
// init_prng(1);
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int R = 6; // Number of rounds
int ver = 5; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "00000000000000000000004000000000";
char dc_str[] = "00000000000000000000000000000000";
char dk1_str[] = "00000000000000000000000000002a00000000000000000000000000000099000000000000000000000000000000f300";
char dk2_str[] = "000000000000000000000054000000000000000000000000000000f30000000000000000000000000000007f00000000";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of parallel threads : N1
int deg1 = 12;
int deg2 = 11;
int N2 = 1 << deg1; // Number of bunches per thread : N2 = 2^(deg)
int N3 = 1 << deg2; // Number of queries per bunch : N3
//################### Number of total queries : N1*N2*N3 ###############
char all_results[NumOfExperiments][20];
double sum = 0;
double sum_temp = 0;
for (int i = 0; i < NumOfExperiments; i++)
{
printf("Experiment Number %d:\n", i);
sum_temp = send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
sum += sum_temp;
sum_temp = (double)(N1 * N2 * N3) / sum_temp;
sprintf(all_results[i], "2^(-%0.2f), ", log(sum_temp) / log(2));
}
printf("A summary of all results:\n");
for (int i = 0; i < NumOfExperiments; i++)
{
printf("%s", all_results[i]);
}
printf("\n##########################\nAverage = 2^(-%0.4f)\n",
(log(NumOfExperiments) + log(N1) + log(N2) + log(N3) - log(sum))/log(2));
return 0;
}
|
MD5_fmt.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 1996-2001,2008,2010-2012,2017 by Solar Designer
*
* ...with changes in the jumbo patch, by bartavelle and magnum.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* There's ABSOLUTELY NO WARRANTY, express or implied.
*/
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "simd-intrinsics.h"
#include "MD5_std.h"
#include "common.h"
#include "formats.h"
#include "cryptmd5_common.h"
#if defined(_OPENMP) && defined(SIMD_PARA_MD5)
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "md5crypt"
#define FORMAT_NAME "crypt(3) $1$"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 15
#define CIPHERTEXT_LENGTH 22
#ifdef SIMD_PARA_MD5
#define BINARY_SIZE 16
#else
#define BINARY_SIZE 4
#endif
#define BINARY_ALIGN 4
#define SALT_SIZE 9
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT MD5_N
#define MAX_KEYS_PER_CRYPT MD5_N
static struct fmt_tests tests[] = {
{"$1$12345678$aIccj83HRDBo6ux1bVx7D1", "0123456789ABCDE"},
{"$apr1$Q6ZYh...$RV6ft2bZ8j.NGrxLYaJt9.", "test"},
{"$1$12345678$f8QoJuo0DpBRfQSD0vglc1", "12345678"},
{"$1$$qRPK7m23GJusamGpoGLby/", ""},
{"$apr1$a2Jqm...$grFrwEgiQleDr0zR4Jx1b.", "15 chars is max"},
{"$1$$AuJCr07mI7DSew03TmBIv/", "no salt"},
{"$1$`!@#%^&*$E6hD76/pKTS8qToBCkux30", "invalid salt"},
{"$1$12345678$xek.CpjQUVgdf/P2N9KQf/", ""},
{"$1$1234$BdIMOAWFOV2AQlLsrN/Sw.", "1234"},
{"$apr1$rBXqc...$NlXxN9myBOk95T0AyLAsJ0", "john"},
{"$apr1$Grpld/..$qp5GyjwM2dnA5Cdej9b411", "the"},
{"$apr1$GBx.D/..$yfVeeYFCIiEXInfRhBRpy/", "ripper"},
{"$1$bb$19smCEBG0Q1pVil0/HqK./", "aaaaa"},
{"$1$coin$rebm0t9KJ56mgGWJF5o5M0", "lapin"},
{"$1$pouet$/Ecz/vyk.zCYvrr6wB78h0", "canard"},
{"$1$test2$02MCIATVoxq3IhgK6XRkb1", "test1"},
{"$1$aussi$X67z3kXsWo92F15uChx1H1", "felicie"},
{"$1$boire$gf.YM2y3InYEu9.NbVr.v0", "manger"},
{"$1$bas$qvkmmWnVHRCSv/6LQ1doH/", "haut"},
{"$1$gauche$EPvd6LZlrgb0MMFPxUrJN1", "droite"},
/* following hashes are AIX non-standard smd5 hashes */
{"{smd5}s8/xSJ/v$uGam4GB8hOjTLQqvBfxJ2/", "password"},
{"{smd5}alRJaSLb$aKM3H1.h1ycXl5GEVDH1e1", "aixsucks?"},
{"{smd5}eLB0QWeS$Eg.YfWY8clZuCxF0xNrKg.", "0123456789ABCDE"},
/* following hashes are AIX standard smd5 hashes (with corrected tag)
* lpa_options = std_hash=true */
{"$1$JVDbGx8K$T9h8HK4LZxeLPMTAxCfpc1", "password"},
{"$1$1Cu6fEvv$42kuaJ5fMEqyVStPuFG040", "0123456789ABCDE"},
{"$1$ql5x.xXL$vYVDhExol2xUBBpERRWcn1", "jtr>hashcat"},
{"$1$27iyq7Ya$miN09fW1Scj0DHVNyewoU/", ""},
{"$1$84Othc1n$v1cuReaa5lRdGuHaOa76n0", "a"},
{"$1$4zq0BsCR$U2ua9WZtDEhzy4gFSiLxN1", "aa"},
{"$1$DKwjKWxp$PY6PdlPZsXjOppPDoFOz4.", "aaa"},
{"$1$OKDV6ppN$viTVmH48bSePiCrMvXT/./", "aaaa"},
{"$1$QEWsCY0O$xrTTMKTepiHMp7Oxgz0pX/", "aaaaa"},
{"$1$5dfdk2dF$XiJBPNrfKcCgdQ/kcoB40/", "aaaaaa"},
{"$1$Ps6A1Cy6$WsvLg9cQhm9JU0rXkLEtz.", "aaaaaaa"},
{"$1$9IK7nZ4M$4nx7Mdj05KGPJX/mZaDrh.", "aaaaaaaa"},
{"$1$l3pNTqwT$GAc.dcRaxCvC20CFGCjp4/", "aaaaaaaaa"},
{"$1$jSAARhJR$6daQ/ekjAL0MgOUgGJyp10", "aaaaaaaaaa"},
{"$1$wk3Xwqqg$2AtdiucwJvJgbaVT1jWpb0", "aaaaaaaaaaa"},
{"$1$G6Fn69Ei$d7AKJUOIdz/gO4Utc0TQP1", "aaaaaaaaaaaa"},
{"$1$A7XJ7lGK$W5jTnH/4lW4XwZ.6F7n1N.", "aaaaaaaaaaaaa"},
{"$1$Rcm46RfA$LfdIK/OP16yHzMYHSlx/B.", "aaaaaaaaaaaaaa"},
{"$1$4bCSSJMN$TcYKTsukD4SFJE1n4MwMZ/", "aaaaaaaaaaaaaaa"},
#if PLAINTEXT_LENGTH > 15
{"$1$mJxBkkl8$u7OHfWCPmNxvf0um7hH89.", "aaaaaaaaaaaaaaaa"},
{"$1$Ub1gBUt4$TNaLxU7Pq5mk/MiDEb60b/", "aaaaaaaaaaaaaaaaa"},
{"$1$8ot7QScR$x.p4vjIgdFxxS83x29PkJ0", "aaaaaaaaaaaaaaaaaa"},
{"$1$wRi4OjD3$eJjKD2AwLMWfOTRYA30zn.", "aaaaaaaaaaaaaaaaaaa"},
{"$1$lmektrsg$2KSRY4EUFzsYNMg80fG4/0", "aaaaaaaaaaaaaaaaaaaa"},
{"$1$tgVBKBmE$YRvzsi7qHP2MC1Atg8VCV.", "aaaaaaaaaaaaaaaaaaaaa"},
{"$1$oTsk88YC$Eh435T1BQzmjQekfqkHof/", "aaaaaaaaaaaaaaaaaaaaaa"},
{"$1$ykxSZEfP$hJrFeGOFk049L.94Mgggj/", "aaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$LBK4p5tD$5/gAIx8/7hpTVwDC/.KQv/", "aaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$fkEasaUI$G7CelOWHkol2nVHN8XQP40", "aaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$gRevVzeY$eMMQrsl5OHL5dP1p/ktJc/", "aaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$164TNEjj$ppoV6Ju6Vu63j1OlM4zit/", "aaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$ErPmhjp2$lZZstb2M455Xhk50eeH4i/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$NUssS5fT$QaS4Ywt0IwzxbE0FAGnXn0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$NxlTyiJ7$gxkXTEJdeTzY8P6tqKmcz.", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$Cmy9x7gW$kamvHI42Kh1CH4Shy6g6S/", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$IsuapfCX$4Yq0Adq5nNZgl0LwbSl5Y0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{"$1$rSZfNcKX$N4XPvGrfhKsyoEcRSaqmG0", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
#endif
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
#ifdef SIMD_PARA_MD5
static unsigned char cursalt[SALT_SIZE];
static int CryptType;
static MD5_word (*sout);
static int omp_para = 1;
#endif
static void init(struct fmt_main *self)
{
MD5_std_init(self);
#if defined(_OPENMP) && defined(SIMD_PARA_MD5)
omp_para = omp_get_max_threads();
if (omp_para < 1)
omp_para = 1;
self->params.min_keys_per_crypt = MD5_N * omp_para;
omp_para *= OMP_SCALE;
self->params.max_keys_per_crypt = MD5_N * omp_para;
#elif MD5_std_mt
self->params.min_keys_per_crypt = MD5_std_min_kpc;
self->params.max_keys_per_crypt = MD5_std_max_kpc;
#endif
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key), MEM_ALIGN_CACHE);
#ifdef SIMD_PARA_MD5
sout = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*sout) * BINARY_SIZE);
#endif
}
static void done(void)
{
#ifdef SIMD_PARA_MD5
MEM_FREE(sout);
#endif
MEM_FREE(saved_key);
}
static int get_hash_0(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_0;
#else
init_t();
return MD5_out[index][0] & PH_MASK_0;
#endif
}
static int get_hash_1(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_1;
#else
init_t();
return MD5_out[index][0] & PH_MASK_1;
#endif
}
static int get_hash_2(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_2;
#else
init_t();
return MD5_out[index][0] & PH_MASK_2;
#endif
}
static int get_hash_3(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_3;
#else
init_t();
return MD5_out[index][0] & PH_MASK_3;
#endif
}
static int get_hash_4(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_4;
#else
init_t();
return MD5_out[index][0] & PH_MASK_4;
#endif
}
static int get_hash_5(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_5;
#else
init_t();
return MD5_out[index][0] & PH_MASK_5;
#endif
}
static int get_hash_6(int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
return ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] & PH_MASK_6;
#else
init_t();
return MD5_out[index][0] & PH_MASK_6;
#endif
}
static int salt_hash(void *salt)
{
unsigned int i, h, retval;
retval = 0;
for (i = 0; i <= 6; i += 2) {
h = (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i])];
h ^= ((unsigned char *)salt)[i + 1];
h <<= 6;
h ^= (unsigned char)atoi64[ARCH_INDEX(((char *)salt)[i + 1])];
h ^= ((unsigned char *)salt)[i];
retval += h;
}
retval ^= retval >> SALT_HASH_LOG;
retval &= SALT_HASH_SIZE - 1;
return retval;
}
static void set_key(char *key, int index)
{
#ifndef SIMD_PARA_MD5
MD5_std_set_key(key, index);
#endif
strnfcpy(saved_key[index], key, PLAINTEXT_LENGTH);
}
static char *get_key(int index)
{
saved_key[index][PLAINTEXT_LENGTH] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
#ifdef SIMD_PARA_MD5
#ifdef _OPENMP
int t;
#pragma omp parallel for
for (t = 0; t < omp_para; t++)
md5cryptsse((unsigned char *)(&saved_key[t*MD5_N]), cursalt, (char *)(&sout[t*MD5_N*BINARY_SIZE/sizeof(MD5_word)]), CryptType);
#else
md5cryptsse((unsigned char *)saved_key, cursalt, (char *)sout, CryptType);
#endif
#else
MD5_std_crypt(count);
#endif
return count;
}
static int cmp_all(void *binary, int count)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
for (y=0;y<SIMD_PARA_MD5*omp_para;y++) for (x=0;x<SIMD_COEF_32;x++)
{
if ( ((MD5_word *)binary)[0] == ((MD5_word *)sout)[x+y*SIMD_COEF_32*4] )
return 1;
}
return 0;
#else
#if MD5_std_mt
int t, n = (count + (MD5_N - 1)) / MD5_N;
#endif
for_each_t(n) {
#if MD5_X2
if (*(MD5_word *)binary == MD5_out[0][0] ||
*(MD5_word *)binary == MD5_out[1][0])
return 1;
#else
if (*(MD5_word *)binary == MD5_out[0][0])
return 1;
#endif
}
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_PARA_MD5
unsigned int x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
if (((unsigned int*)binary)[0] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+0*SIMD_COEF_32])
return 0;
if (((unsigned int*)binary)[1] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+1*SIMD_COEF_32])
return 0;
if (((unsigned int*)binary)[2] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+2*SIMD_COEF_32])
return 0;
if (((unsigned int*)binary)[3] != ((unsigned int*)sout)[x+y*SIMD_COEF_32*4+3*SIMD_COEF_32])
return 0;
return 1;
#else
init_t();
return *(MD5_word *)binary == MD5_out[index][0];
#endif
}
static int cmp_exact(char *source, int index)
{
#ifdef SIMD_PARA_MD5
return 1;
#else
init_t();
return !memcmp(MD5_std_get_binary(source), MD5_out[index],
sizeof(MD5_binary));
#endif
}
static void set_salt(void *salt)
{
#ifdef SIMD_PARA_MD5
memcpy(cursalt, salt, SALT_SIZE);
CryptType = cursalt[8];
cursalt[8] = 0;
#endif
MD5_std_set_salt(salt);
}
static void *get_salt(char *ciphertext) {
return MD5_std_get_salt(ciphertext);
}
static void *get_binary(char *ciphertext) {
return MD5_std_get_binary(ciphertext);
}
struct fmt_main fmt_MD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"MD5 " MD5_ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if MD5_std_mt || defined(SIMD_PARA_MD5)
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
{ NULL },
{
md5_salt_prefix,
apr1_salt_prefix,
smd5_salt_prefix
},
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
cryptmd5_common_valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
/// \param AllowExtractorSinking Flag to include sinking instructions,
/// emitted by CodeExtractor, in the
/// outlined region. Default is false.
void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
template <typename T, typename U>
LocationDescription(const IRBuilder<T, U> &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; i -= 1u)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the loop.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down.
/// \param IsSigned Whether Start, Stop and Step are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute/derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// TODO: Workshare loops with static scheduling may contain up to two loops
/// that fulfill the requirements of an OpenMP canonical loop. One for
/// iterating over all iterations of a chunk and another one for iterating
/// over all chunks that are executed on the same thread. Returning
/// CanonicalLoopInfo objects representing them may eventually be useful for
/// the apply clause planned in OpenMP 6.0, but currently whether these are
/// canonical loops is irrelevant.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP, bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Fully unroll a loop.
///
/// Instead of unrolling the loop immediately (and duplicating its body
/// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
/// metadata.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Fully or partially unroll a loop. How the loop is unrolled is determined
/// using LLVM's LoopUnrollPass.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Partially unroll a loop.
///
/// The CanonicalLoopInfo of the unrolled loop for use with chained
/// loop-associated directive can be requested using \p UnrolledCLI. Not
/// needing the CanonicalLoopInfo allows more efficient code generation by
/// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
/// A loop-associated directive applied to the unrolled loop needs to know the
/// new trip count which means that if using a heuristically determined unroll
/// factor (\p Factor == 0), that factor must be computed immediately. We are
/// using the same logic as the LoopUnrollPass to derived the unroll factor,
/// but which assumes that some canonicalization has taken place (e.g.
/// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
/// better when the unrolled loop's CanonicalLoopInfo is not needed.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
/// \param Factor The factor to unroll the loop by. A factor of 0
/// indicates that a heuristic should be used to determine
/// the unroll-factor.
/// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
/// partially unrolled loop. Otherwise, uses loop metadata
/// to defer unrolling to the LoopUnrollPass.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
CanonicalLoopInfo **UnrolledCLI);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
/// Functions used to generate reductions. Such functions take two Values
/// representing LHS and RHS of the reduction, respectively, and a reference
/// to the value that is updated to refer to the reduction result.
using ReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
/// Values representing pointers to LHS and RHS of the reduction, as well as
/// the element type of these pointers. They are expected to atomically
/// update the LHS to the reduced value.
using AtomicReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
: ElementType(ElementType), Variable(Variable),
PrivateVariable(PrivateVariable), ReductionGen(ReductionGen),
AtomicReductionGen(AtomicReductionGen) {
assert(cast<PointerType>(Variable->getType())
->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type");
}
/// Reduction element type, must match pointee type of variable.
Type *ElementType;
/// Reduction variable of pointer type.
Value *Variable;
/// Thread-private partial reduction variable.
Value *PrivateVariable;
/// Callback for generating the reduction body. The IR produced by this will
/// be used to combine two values in a thread-safe context, e.g., under
/// lock or within the same thread, and therefore need not be atomic.
ReductionGenTy ReductionGen;
/// Callback for generating the atomic reduction body, may be null. The IR
/// produced by this will be used to atomically combine two values during
/// reduction. If null, the implementation will use the non-atomic version
/// along with the appropriate synchronization mechanisms.
AtomicReductionGenTy AtomicReductionGen;
};
// TODO: provide atomic and non-atomic reduction generators for reduction
// operators defined by the OpenMP specification.
/// Generator for '#omp reduction'.
///
/// Emits the IR instructing the runtime to perform the specific kind of
/// reductions. Expects reduction variables to have been privatized and
/// initialized to reduction-neutral values separately. Emits the calls to
/// runtime functions as well as the reduction function and the basic blocks
/// performing the reduction atomically and non-atomically.
///
/// The code emitted for the following:
///
/// \code
/// type var_1;
/// type var_2;
/// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
/// /* body */;
/// \endcode
///
/// corresponds to the following sketch.
///
/// \code
/// void _outlined_par() {
/// // N is the number of different reductions.
/// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
/// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
/// _omp_reduction_func,
/// _gomp_critical_user.reduction.var)) {
/// case 1: {
/// var_1 = var_1 <reduction-op> privatized_var_1;
/// var_2 = var_2 <reduction-op> privatized_var_2;
/// // ...
/// __kmpc_end_reduce(...);
/// break;
/// }
/// case 2: {
/// _Atomic<ReductionOp>(var_1, privatized_var_1);
/// _Atomic<ReductionOp>(var_2, privatized_var_2);
/// // ...
/// break;
/// }
/// default: break;
/// }
/// }
///
/// void _omp_reduction_func(void **lhs, void **rhs) {
/// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
/// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
/// // ...
/// }
/// \endcode
///
/// \param Loc The location where the reduction was
/// encountered. Must be within the associate
/// directive and after the last local access to the
/// reduction variables.
/// \param AllocaIP An insertion point suitable for allocas usable
/// in reductions.
/// \param ReductionInfos A list of info on each reduction variable.
/// \param IsNoWait A flag set if the reduction is marked as nowait.
InsertPointTy createReductions(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<ReductionInfo> ReductionInfos,
bool IsNoWait = false);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column,
uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize,
Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
uint32_t &SrcLocStrSize);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
/// Create a global flag \p Namein the module with initial value \p Value.
GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArg The argument types.
/// \param MapnamesArg The argument names.
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param NumOperands Number of operands in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the masked.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the critical.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp ordered depend (source | sink)'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param NumLoops The number of loops in depend clause.
/// \param StoreValues The value will be stored in vector address.
/// \param Name The name of alloca instruction.
/// \param IsDependSource If true, depend source; otherwise, depend sink.
///
/// \return The insertion position *after* the ordered.
InsertPointTy createOrderedDepend(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumLoops,
ArrayRef<llvm::Value *> StoreValues,
const Twine &Name, bool IsDependSource);
/// Generator for '#omp ordered [threads | simd]'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param IsThreads If true, with threads clause or without clause;
/// otherwise, with simd clause;
///
/// \returns The insertion position *after* the ordered.
InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
bool IsThreads);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool VolatileX,
bool IsXBinopExpr);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
Instruction *AllocIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXBinopExpr);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXBinopExpr);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
/// * The number of loop iterations must have been computed before entering the
/// loop.
///
/// * Has an (unsigned) logical induction variable that starts at zero and
/// increments by one.
///
/// * The loop's CFG itself has no side-effects. The OpenMP specification
/// itself allows side-effects, but the order in which they happen, including
/// how often or whether at all, is unspecified. We expect that the frontend
/// will emit those side-effect instructions somewhere (e.g. before the loop)
/// such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
///
/// FIXME: This currently includes the Preheader and After blocks even though
/// their content is (mostly) not under CanonicalLoopInfo's control.
/// Re-evaluated whether this makes sense.
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// Returns whether this object currently represents the IR of a loop. If
/// returning false, it may have been consumed by a loop transformation or not
/// been intialized. Do not use in this case;
bool isValid() const { return Header; }
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const;
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const {
assert(isValid() && "Requires a valid canonical loop");
return Header;
}
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const {
assert(isValid() && "Requires a valid canonical loop");
return Cond;
}
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
}
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const {
assert(isValid() && "Requires a valid canonical loop");
return Latch;
}
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit;
}
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit->getSingleSuccessor();
}
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const {
assert(isValid() && "Requires a valid canonical loop");
return getIndVar()->getType();
}
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Preheader = getPreheader();
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Body = getBody();
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *After = getAfter();
return {After, After->begin()};
};
Function *getFunction() const {
assert(isValid() && "Requires a valid canonical loop");
return Header->getParent();
}
/// Consistency self-check.
void assertOK() const;
/// Invalidate this loop. That is, the underlying IR does not fulfill the
/// requirements of an OpenMP canonical loop anymore.
void invalidate();
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
loop-12.c | /* { dg-do run } */
#include <omp.h>
extern void abort (void);
#define LLONG_MAX __LONG_LONG_MAX__
#define ULLONG_MAX (LLONG_MAX * 2ULL + 1)
#define INT_MAX __INT_MAX__
int arr[6 * 5];
void
set (int loopidx, int idx)
{
#pragma omp atomic
arr[loopidx * 5 + idx]++;
}
#define check(var, val, loopidx, idx) \
if (var == (val)) set (loopidx, idx); else
#define test(loopidx, count) \
for (idx = 0; idx < 5; idx++) \
if (arr[loopidx * 5 + idx] != idx < count) \
abort (); \
else \
arr[loopidx * 5 + idx] = 0
int
test1 (void)
{
int e = 0, idx;
#pragma omp parallel reduction(+:e)
{
long long i;
unsigned long long j;
#pragma omp for schedule(dynamic,1) nowait
for (i = LLONG_MAX - 30001; LLONG_MAX - 10001 >= i; i += 10000)
{
check (i, LLONG_MAX - 30001, 0, 0)
check (i, LLONG_MAX - 20001, 0, 1)
check (i, LLONG_MAX - 10001, 0, 2)
e = 1;
}
#pragma omp for schedule(dynamic,1) nowait
for (i = -LLONG_MAX + 30000; -LLONG_MAX + 10000 <= i; i -= 10000)
{
check (i, -LLONG_MAX + 30000, 1, 0)
check (i, -LLONG_MAX + 20000, 1, 1)
check (i, -LLONG_MAX + 10000, 1, 2)
e = 1;
}
#pragma omp for schedule(dynamic,1) nowait
for (j = 20; LLONG_MAX - 70 >= j; j += LLONG_MAX + 50ULL)
{
check (j, 20, 2, 0)
e = 1;
}
#pragma omp for schedule(dynamic,1) nowait
for (j = ULLONG_MAX - 3; LLONG_MAX + 70ULL <= j; j -= LLONG_MAX + 50ULL)
{
check (j, ULLONG_MAX - 3, 3, 0)
e = 1;
}
#pragma omp for schedule(dynamic,1) nowait
for (j = LLONG_MAX - 20000ULL; LLONG_MAX + 10000ULL >= j; j += 10000ULL)
{
check (j, LLONG_MAX - 20000ULL, 4, 0)
check (j, LLONG_MAX - 10000ULL, 4, 1)
check (j, LLONG_MAX, 4, 2)
check (j, LLONG_MAX + 10000ULL, 4, 3)
e = 1;
}
#pragma omp for schedule(dynamic,1) nowait
for (i = -3LL * INT_MAX - 20000LL; INT_MAX + 10000LL >= i; i += INT_MAX + 200LL)
{
check (i, -3LL * INT_MAX - 20000LL, 5, 0)
check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
check (i, -20000LL + 600LL, 5, 3)
check (i, INT_MAX - 20000LL + 800LL, 5, 4)
e = 1;
}
}
if (e)
abort ();
test (0, 3);
test (1, 3);
test (2, 1);
test (3, 1);
test (4, 4);
test (5, 5);
return 0;
}
int
test2 (void)
{
int e = 0, idx;
#pragma omp parallel reduction(+:e)
{
long long i;
unsigned long long j;
#pragma omp for schedule(guided,1) nowait
for (i = LLONG_MAX - 30001; LLONG_MAX - 10001 >= i; i += 10000)
{
check (i, LLONG_MAX - 30001, 0, 0)
check (i, LLONG_MAX - 20001, 0, 1)
check (i, LLONG_MAX - 10001, 0, 2)
e = 1;
}
#pragma omp for schedule(guided,1) nowait
for (i = -LLONG_MAX + 30000; -LLONG_MAX + 10000 <= i; i -= 10000)
{
check (i, -LLONG_MAX + 30000, 1, 0)
check (i, -LLONG_MAX + 20000, 1, 1)
check (i, -LLONG_MAX + 10000, 1, 2)
e = 1;
}
#pragma omp for schedule(guided,1) nowait
for (j = 20; LLONG_MAX - 70 >= j; j += LLONG_MAX + 50ULL)
{
check (j, 20, 2, 0)
e = 1;
}
#pragma omp for schedule(guided,1) nowait
for (j = ULLONG_MAX - 3; LLONG_MAX + 70ULL <= j; j -= LLONG_MAX + 50ULL)
{
check (j, ULLONG_MAX - 3, 3, 0)
e = 1;
}
#pragma omp for schedule(guided,1) nowait
for (j = LLONG_MAX - 20000ULL; LLONG_MAX + 10000ULL >= j; j += 10000ULL)
{
check (j, LLONG_MAX - 20000ULL, 4, 0)
check (j, LLONG_MAX - 10000ULL, 4, 1)
check (j, LLONG_MAX, 4, 2)
check (j, LLONG_MAX + 10000ULL, 4, 3)
e = 1;
}
#pragma omp for schedule(guided,1) nowait
for (i = -3LL * INT_MAX - 20000LL; INT_MAX + 10000LL >= i; i += INT_MAX + 200LL)
{
check (i, -3LL * INT_MAX - 20000LL, 5, 0)
check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
check (i, -20000LL + 600LL, 5, 3)
check (i, INT_MAX - 20000LL + 800LL, 5, 4)
e = 1;
}
}
if (e)
abort ();
test (0, 3);
test (1, 3);
test (2, 1);
test (3, 1);
test (4, 4);
test (5, 5);
return 0;
}
int
test3 (void)
{
int e = 0, idx;
#pragma omp parallel reduction(+:e)
{
long long i;
unsigned long long j;
#pragma omp for schedule(static) nowait
for (i = LLONG_MAX - 30001; LLONG_MAX - 10001 >= i; i += 10000)
{
check (i, LLONG_MAX - 30001, 0, 0)
check (i, LLONG_MAX - 20001, 0, 1)
check (i, LLONG_MAX - 10001, 0, 2)
e = 1;
}
#pragma omp for schedule(static) nowait
for (i = -LLONG_MAX + 30000; -LLONG_MAX + 10000 <= i; i -= 10000)
{
check (i, -LLONG_MAX + 30000, 1, 0)
check (i, -LLONG_MAX + 20000, 1, 1)
check (i, -LLONG_MAX + 10000, 1, 2)
e = 1;
}
#pragma omp for schedule(static) nowait
for (j = 20; LLONG_MAX - 70 >= j; j += LLONG_MAX + 50ULL)
{
check (j, 20, 2, 0)
e = 1;
}
#pragma omp for schedule(static) nowait
for (j = ULLONG_MAX - 3; LLONG_MAX + 70ULL <= j; j -= LLONG_MAX + 50ULL)
{
check (j, ULLONG_MAX - 3, 3, 0)
e = 1;
}
#pragma omp for schedule(static) nowait
for (j = LLONG_MAX - 20000ULL; LLONG_MAX + 10000ULL >= j; j += 10000ULL)
{
check (j, LLONG_MAX - 20000ULL, 4, 0)
check (j, LLONG_MAX - 10000ULL, 4, 1)
check (j, LLONG_MAX, 4, 2)
check (j, LLONG_MAX + 10000ULL, 4, 3)
e = 1;
}
#pragma omp for schedule(static) nowait
for (i = -3LL * INT_MAX - 20000LL; INT_MAX + 10000LL >= i; i += INT_MAX + 200LL)
{
check (i, -3LL * INT_MAX - 20000LL, 5, 0)
check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
check (i, -20000LL + 600LL, 5, 3)
check (i, INT_MAX - 20000LL + 800LL, 5, 4)
e = 1;
}
}
if (e)
abort ();
test (0, 3);
test (1, 3);
test (2, 1);
test (3, 1);
test (4, 4);
test (5, 5);
return 0;
}
int
test4 (void)
{
int e = 0, idx;
#pragma omp parallel reduction(+:e)
{
long long i;
unsigned long long j;
#pragma omp for schedule(static,1) nowait
for (i = LLONG_MAX - 30001; LLONG_MAX - 10001 >= i; i += 10000)
{
check (i, LLONG_MAX - 30001, 0, 0)
check (i, LLONG_MAX - 20001, 0, 1)
check (i, LLONG_MAX - 10001, 0, 2)
e = 1;
}
#pragma omp for schedule(static,1) nowait
for (i = -LLONG_MAX + 30000; -LLONG_MAX + 10000 <= i; i -= 10000)
{
check (i, -LLONG_MAX + 30000, 1, 0)
check (i, -LLONG_MAX + 20000, 1, 1)
check (i, -LLONG_MAX + 10000, 1, 2)
e = 1;
}
#pragma omp for schedule(static,1) nowait
for (j = 20; LLONG_MAX - 70 >= j; j += LLONG_MAX + 50ULL)
{
check (j, 20, 2, 0)
e = 1;
}
#pragma omp for schedule(static,1) nowait
for (j = ULLONG_MAX - 3; LLONG_MAX + 70ULL <= j; j -= LLONG_MAX + 50ULL)
{
check (j, ULLONG_MAX - 3, 3, 0)
e = 1;
}
#pragma omp for schedule(static,1) nowait
for (j = LLONG_MAX - 20000ULL; LLONG_MAX + 10000ULL >= j; j += 10000ULL)
{
check (j, LLONG_MAX - 20000ULL, 4, 0)
check (j, LLONG_MAX - 10000ULL, 4, 1)
check (j, LLONG_MAX, 4, 2)
check (j, LLONG_MAX + 10000ULL, 4, 3)
e = 1;
}
#pragma omp for schedule(static,1) nowait
for (i = -3LL * INT_MAX - 20000LL; INT_MAX + 10000LL >= i; i += INT_MAX + 200LL)
{
check (i, -3LL * INT_MAX - 20000LL, 5, 0)
check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
check (i, -20000LL + 600LL, 5, 3)
check (i, INT_MAX - 20000LL + 800LL, 5, 4)
e = 1;
}
}
if (e)
abort ();
test (0, 3);
test (1, 3);
test (2, 1);
test (3, 1);
test (4, 4);
test (5, 5);
return 0;
}
int
test5 (void)
{
int e = 0, idx;
#pragma omp parallel reduction(+:e)
{
long long i;
unsigned long long j;
#pragma omp for schedule(runtime) nowait
for (i = LLONG_MAX - 30001; LLONG_MAX - 10001 >= i; i += 10000)
{
check (i, LLONG_MAX - 30001, 0, 0)
check (i, LLONG_MAX - 20001, 0, 1)
check (i, LLONG_MAX - 10001, 0, 2)
e = 1;
}
#pragma omp for schedule(runtime) nowait
for (i = -LLONG_MAX + 30000; -LLONG_MAX + 10000 <= i; i -= 10000)
{
check (i, -LLONG_MAX + 30000, 1, 0)
check (i, -LLONG_MAX + 20000, 1, 1)
check (i, -LLONG_MAX + 10000, 1, 2)
e = 1;
}
#pragma omp for schedule(runtime) nowait
for (j = 20; LLONG_MAX - 70 >= j; j += LLONG_MAX + 50ULL)
{
check (j, 20, 2, 0)
e = 1;
}
#pragma omp for schedule(runtime) nowait
for (j = ULLONG_MAX - 3; LLONG_MAX + 70ULL <= j; j -= LLONG_MAX + 50ULL)
{
check (j, ULLONG_MAX - 3, 3, 0)
e = 1;
}
#pragma omp for schedule(runtime) nowait
for (j = LLONG_MAX - 20000ULL; LLONG_MAX + 10000ULL >= j; j += 10000ULL)
{
check (j, LLONG_MAX - 20000ULL, 4, 0)
check (j, LLONG_MAX - 10000ULL, 4, 1)
check (j, LLONG_MAX, 4, 2)
check (j, LLONG_MAX + 10000ULL, 4, 3)
e = 1;
}
#pragma omp for schedule(runtime) nowait
for (i = -3LL * INT_MAX - 20000LL; INT_MAX + 10000LL >= i; i += INT_MAX + 200LL)
{
check (i, -3LL * INT_MAX - 20000LL, 5, 0)
check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1)
check (i, -INT_MAX - 20000LL + 400LL, 5, 2)
check (i, -20000LL + 600LL, 5, 3)
check (i, INT_MAX - 20000LL + 800LL, 5, 4)
e = 1;
}
}
if (e)
abort ();
test (0, 3);
test (1, 3);
test (2, 1);
test (3, 1);
test (4, 4);
test (5, 5);
return 0;
}
int
main (void)
{
if (2 * sizeof (int) != sizeof (long long))
return 0;
test1 ();
test2 ();
test3 ();
test4 ();
omp_set_schedule (omp_sched_static, 0);
test5 ();
omp_set_schedule (omp_sched_static, 3);
test5 ();
omp_set_schedule (omp_sched_dynamic, 5);
test5 ();
omp_set_schedule (omp_sched_guided, 2);
test5 ();
return 0;
}
|
taskloop.c | #include <omp.h>
void main ( omp_lock_t*lock, int n )
{
int data1 = 10;
int N = 100;
int M = 50;
#pragma omp parallel
{
#pragma omp single
#pragma omp taskgroup
{
compute_update(data1);
#pragma omp taskloop collapse(2) nogroup
for (int i=0; i<N; i++)
for (int j=0; j<M; j++)
data1 = data1 + 1.3;
}
}
}
|
SlicedC02BasedTraversal.h | /**
* @file SlicedC02BasedTraversal.h
*
* @date 24 May 2020
* @author fischerv
*/
#pragma once
#include <algorithm>
#include "autopas/containers/cellPairTraversals/SlicedBasedTraversal.h"
#include "autopas/utils/DataLayoutConverter.h"
#include "autopas/utils/ThreeDimensionalMapping.h"
#include "autopas/utils/WrapOpenMP.h"
namespace autopas {
/**
* This class provides the colored sliced traversal.
*
* The traversal finds the longest dimension of the simulation domain and cuts
* the domain into as many slices as possible along this dimension. Unlike the regular
* sliced traversal, this version uses a 2-coloring to prevent race conditions, instead of
* locking the starting layers. This could also be describes as a c02-traversal. This class
* is however not derived from CBasedTraversal, as that would not allow varying slice thicknesses,
* and would prevent us from selecting the dimension in which we cut the slices.
*
* @tparam ParticleCell The type of cells.
* @tparam PairwiseFunctor The functor that defines the interaction of two particles.
* @tparam dataLayout
* @tparam useNewton3
* @tparam spaciallyForward Whether the base step only covers neigboring cells tha are spacially forward (for example
* c08)
*/
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3,
bool spaciallyForward>
class SlicedC02BasedTraversal
: public SlicedBasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3, spaciallyForward> {
public:
/**
* Constructor of the colored sliced traversal.
* @param dims The dimensions of the cellblock, i.e. the number of cells in x,
* y and z direction.
* @param pairwiseFunctor The functor that defines the interaction of two particles.
* @param interactionLength Interaction length (cutoff + skin).
* @param cellLength cell length.
*/
explicit SlicedC02BasedTraversal(const std::array<unsigned long, 3> &dims, PairwiseFunctor *pairwiseFunctor,
const double interactionLength, const std::array<double, 3> &cellLength)
: SlicedBasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3, spaciallyForward>(
dims, pairwiseFunctor, interactionLength, cellLength) {}
/**
* The main traversal of the colored sliced traversal.
* This provides the structure of the loops and its parallelization.
*
* @copydetails C01BasedTraversal::c01Traversal()
*
*/
template <typename LoopBody>
inline void cSlicedTraversal(LoopBody &&loopBody);
/**
* Checks if the traversal is applicable to the current state of the domain.
* @return true iff the traversal can be applied.
*/
[[nodiscard]] bool isApplicable() const override {
return not(dataLayout == DataLayoutOption::cuda) and
this->_cellsPerDimension[this->_dimsPerLength[0]] >= this->_overlapLongestAxis;
}
/**
* Load Data Layouts and sets up slice thicknesses.
*/
void initTraversal() override {
this->loadDataLayout();
// split domain across its longest dimension
auto minSliceThickness = this->_overlapLongestAxis;
this->initSliceThickness(minSliceThickness);
}
};
template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3,
bool spaciallyForward>
template <typename LoopBody>
void SlicedC02BasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3, spaciallyForward>::cSlicedTraversal(
LoopBody &&loopBody) {
using std::array;
auto numSlices = this->_sliceThickness.size();
// check if applicable
std::array<size_t, 2> overLapps23{this->_overlap[this->_dimsPerLength[1]], this->_overlap[this->_dimsPerLength[2]]};
if (not spaciallyForward) {
overLapps23 = {0ul, 0ul};
}
for (size_t offset = 0; offset < 2; offset++) {
#ifdef AUTOPAS_OPENMP
// although every thread gets exactly one iteration (=slice) this is faster than a normal parallel region
#pragma omp parallel for schedule(dynamic, 1)
#endif
for (size_t slice = offset; slice < numSlices; slice += 2) {
array<unsigned long, 3> myStartArray{0, 0, 0};
for (size_t i = 0; i < slice; ++i) {
myStartArray[this->_dimsPerLength[0]] += this->_sliceThickness[i];
}
const auto lastLayer = myStartArray[this->_dimsPerLength[0]] + this->_sliceThickness[slice];
for (unsigned long dimSlice = myStartArray[this->_dimsPerLength[0]]; dimSlice < lastLayer; ++dimSlice) {
for (unsigned long dimMedium = 0;
dimMedium < this->_cellsPerDimension[this->_dimsPerLength[1]] - overLapps23[0]; ++dimMedium) {
for (unsigned long dimShort = 0;
dimShort < this->_cellsPerDimension[this->_dimsPerLength[2]] - overLapps23[1]; ++dimShort) {
array<unsigned long, 3> idArray = {};
idArray[this->_dimsPerLength[0]] = dimSlice;
idArray[this->_dimsPerLength[1]] = dimMedium;
idArray[this->_dimsPerLength[2]] = dimShort;
loopBody(idArray[0], idArray[1], idArray[2]);
}
}
}
}
}
}
} // namespace autopas
|
main.c |
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include <time.h>
#include <omp.h>
double interval(struct timespec start, struct timespec end)
{
struct timespec temp;
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
if (temp.tv_nsec < 0) {
temp.tv_sec = temp.tv_sec - 1;
temp.tv_nsec = temp.tv_nsec + 1000000000;
}
return (((double)temp.tv_sec) + ((double)temp.tv_nsec)*1.0e-9);
}
/*
This method does not require adjusting a #define constant
How to use this method:
struct timespec time_start, time_stop;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time_start);
// DO SOMETHING THAT TAKES TIME
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time_stop);
measurement = interval(time_start, time_stop);
*/
#define PLATEAU 0
typedef unsigned char image_t, *image_ptr_t;
typedef int img_t, *img_ptr_t;
img_ptr_t convert2data(image_ptr_t image, int width, int height);
image_ptr_t convert2image(img_ptr_t image, int width, int height);
void steepest_descent_kernel(img_ptr_t in, img_ptr_t *out, int width, int height);
void border_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height);
void minima_basin_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height);
void watershed_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height);
int main(int argc, char **argv);
int main(int argc, char **argv)
{
int width, height, channels;
image_ptr_t data = stbi_load(argv[1], &width, &height, &channels, 1);
img_ptr_t input = convert2data(data, width, height);
stbi_image_free(data);
img_ptr_t lowest_descent = NULL;
img_ptr_t border = NULL;
img_ptr_t minima = NULL;
img_ptr_t watershed = NULL;
struct timespec time_start, time_stop;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time_start); // start timer
steepest_descent_kernel(input, &lowest_descent, width, height);
border_kernel(input, lowest_descent, &border, width, height);
minima_basin_kernel(input, border, &minima, width, height);
watershed_kernel(input, minima, &watershed, width, height);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time_stop);
printf("%f\n", interval(time_start, time_stop));
stbi_write_png("1_lowest_descent_result.png", width, height, channels, convert2image(lowest_descent, width, height), width * channels);
stbi_write_png("2_border_result.png", width, height, channels, convert2image(border, width, height), width * channels);
stbi_write_png("3_minima_basin_result.png", width, height, channels, convert2image(minima, width, height), width * channels);
stbi_write_png("4_watershed_result.png", width, height, channels, convert2image(watershed, width, height), width * channels);
free(watershed);
free(lowest_descent);
free(border);
free(input);
return 0;
}
img_ptr_t convert2data(image_ptr_t image, int width, int height)
{
img_ptr_t temp = (img_ptr_t)calloc(width * height, sizeof(img_t));
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
temp[i * width + j] = (img_t)image[i * width + j];
}
}
return temp;
}
image_ptr_t convert2image(img_ptr_t image, int width, int height)
{
// Step 1: find min and max values from the image
img_t max = INT_MIN, min = INT_MAX;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
img_t current_pixel = image[i * width + j];
if (current_pixel < min)
min = current_pixel;
if (current_pixel > max)
max = current_pixel;
}
}
// printf("min: %i\n", min);
// printf("max: %i\n", max);
// create a new image with the values scaled from [0-255]
image_ptr_t temp = (image_ptr_t)calloc(width * height, sizeof(image_t));
float max_min = max-min;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
img_t pix_val = image[i * width + j];
float val = (pix_val - min) / (max_min);
temp[i * width + j] = (image_t)(val * 255);
}
}
return temp;
}
void steepest_descent_kernel(img_ptr_t in, img_ptr_t *out, int width, int height)
{
img_ptr_t _lowest = (img_ptr_t)calloc(width * height, sizeof(img_t));
if (_lowest == NULL)
{
perror("Failed to allocate memory!\n");
exit(EXIT_FAILURE);
}
#pragma omp parallel for
for (int i = 1; i < height - 1; i++)
{
for (int j = 1; j < width - 1; j++)
{
// find minimum in neighbors
img_t min = (img_t)INFINITY;
if (min > in[i * width + (j + 1)])
min = in[i * width + (j + 1)];
if (min > in[i * width + (j - 1)])
min = in[i * width + (j - 1)];
if (min > in[(i + 1) * width + j])
min = in[(i + 1) * width + j];
if (min > in[(i - 1) * width + j])
min = in[(i - 1) * width + j];
if (min > in[(i - 1) * width + (j + 1)])
min = in[(i - 1) * width + (j + 1)];
if (min > in[(i - 1) * width + (j - 1)])
min = in[(i - 1) * width + (j - 1)];
if (min > in[(i + 1) * width + (j + 1)])
min = in[(i + 1) * width + (j + 1)];
if (min > in[(i + 1) * width + (j - 1)])
min = in[(i + 1) * width + (j - 1)];
// check if we have plateaued
bool exists_q = false;
img_t p = in[i * width + j];
if (p > in[i * width + (j + 1)] && in[i * width + (j + 1)] == min)
{
_lowest[i * width + j] = -(i * width + (j + 1));
exists_q = true;
goto FOUND_LOWEST_DESCENT;
}
if (p > in[i * width + (j - 1)] && in[i * width + (j - 1)] == min)
{
_lowest[i * width + j] = -(i * width + (j - 1));
exists_q = true;
goto FOUND_LOWEST_DESCENT;
}
if (p > in[(i + 1) * width + j] && in[(i + 1) * width + j] == min)
{
_lowest[i * width + j] = -((i - 1) * width + j);
exists_q = true;
goto FOUND_LOWEST_DESCENT;
}
if (p > in[(i - 1) * width + j] && in[(i - 1) * width + j] == min)
{
_lowest[i * width + j] = -((i - 1) * width + j);
exists_q = true;
goto FOUND_LOWEST_DESCENT;
}
if (p > in[(i - 1) * width + (j + 1)] && in[(i - 1) * width + (j + 1)] == min)
{
_lowest[i * width + j] = -((i - 1) * width + (j + 1));
exists_q = true;
goto FOUND_LOWEST_DESCENT;
}
if (p > in[(i - 1) * width + (j - 1)] && in[(i - 1) * width + (j - 1)] == min)
{
_lowest[i * width + j] = -((i - 1) * width + (j - 1));
exists_q = true;
goto FOUND_LOWEST_DESCENT;
}
if (p > in[(i + 1) * width + (j + 1)] && in[(i + 1) * width + (j + 1)] == min)
{
_lowest[i * width + j] = -((i + 1) * width + (j + 1));
exists_q = true;
goto FOUND_LOWEST_DESCENT;
}
if (p > in[(i + 1) * width + (j - 1)] && in[(i + 1) * width + (j - 1)] == min)
{
_lowest[i * width + j] = -((i + 1) * width + (j - 1));
exists_q = true;
goto FOUND_LOWEST_DESCENT;
}
FOUND_LOWEST_DESCENT:
if (!exists_q)
{
_lowest[i * width + j] = (img_t)PLATEAU;
}
}
}
*out = _lowest;
}
void border_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height)
{
img_ptr_t _border = (img_ptr_t)calloc(width * height, sizeof(img_t));
if (in == NULL)
{
perror("Failed to allocate memory!\n");
exit(EXIT_FAILURE);
}
bool stable = false;
img_ptr_t temp_border = (img_ptr_t)calloc(width * height, sizeof(img_t));
if (temp_border == NULL)
{
perror("Failed to allocate memory!\n");
exit(EXIT_FAILURE);
};
while (!stable)
{
stable = true;
memcpy(temp_border, _border, width * height * sizeof(img_t));
#pragma omp parallel for
for (int i = 1; i < height - 1; i++)
{
for (int j = 1; j < width - 1; j++)
{
if (in[i * width + j] == (img_t)PLATEAU)
{
if (in[i * width + (j + 1)] < 0 && image[i * width + (j + 1)] == image[i * width + j])
{
if (temp_border[i * width + j] != -(i * width + (j + 1)))
stable = false;
temp_border[i * width + j] = -(i * width + (j + 1));
break;
}
if (in[i * width + (j - 1)] < 0 && image[i * width + (j - 1)] == image[i * width + j])
{
if (temp_border[i * width + j] != -(i * width + (j - 1)))
stable = false;
temp_border[i * width + j] = -(i * width + (j - 1));
break;
}
if (in[(i + 1) * width + (j + 1)] < 0 && image[(i + 1) * width + (j + 1)] == image[i * width + j])
{
if (temp_border[i * width + j] != -((i + 1) * width + (j + 1)))
stable = false;
temp_border[i * width + j] = -((i + 1) * width + (j + 1));
break;
}
if (in[(i + 1) * width + (j - 1)] < 0 && image[(i + 1) * width + (j - 1)] == image[i * width + j])
{
if (temp_border[i * width + j] != -((i + 1) * width + (j - 1)))
stable = false;
temp_border[i * width + j] = -((i + 1) * width + (j - 1));
break;
}
if (in[(i - 1) * width + (j + 1)] < 0 && image[(i - 1) * width + (j + 1)] == image[i * width + j])
{
if (temp_border[i * width + j] != -((i - 1) * width + (j + 1)))
stable = false;
temp_border[i * width + j] = -((i - 1) * width + (j + 1));
break;
}
if (in[(i - 1) * width + (j - 1)] < 0 && image[(i - 1) * width + (j - 1)] == image[i * width + j])
{
if (temp_border[i * width + j] != -((i - 1) * width + (j - 1)))
stable = false;
temp_border[i * width + j] = -((i - 1) * width + (j - 1));
break;
}
if (in[(i + 1) * width + j] < 0 && image[(i + 1) * width + j] == image[i * width + j])
{
if (temp_border[i * width + j] != -((i + 1) * width + j))
stable = false;
temp_border[i * width + j] = -((i + 1) * width + j);
break;
}
if (in[(i - 1) * width + j] < 0 && image[(i - 1) * width + j] == image[i * width + j])
{
if (temp_border[i * width + j] != -((i - 1) * width + j))
stable = false;
temp_border[i * width + j] = -((i - 1) * width + j);
break;
}
}
}
}
memcpy(_border, temp_border, width * height * sizeof(img_t));
}
#pragma omp parallel for
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (in[i * width + j] == (img_t)PLATEAU)
{
_border[i * width + j] = -(i * width + j);
}
}
}
*out = _border;
}
void minima_basin_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height)
{
img_ptr_t _minima = (img_ptr_t)calloc(width * height, sizeof(img_t));
if (_minima == NULL)
{
perror("Failed to allocate memory!\n");
exit(EXIT_FAILURE);
}
memcpy(_minima, in, height * width * sizeof(img_t));
bool stable = false;
while (!stable)
{
stable = true;
#pragma omp parallel for
for (int i = 1; i < height - 1; i++)
{
for (int j = 1; j < width - 1; j++)
{
if (_minima[i * width + j] > (img_t)PLATEAU)
{
img_t label = (img_t)INFINITY;
if (_minima[i * width + (j + 1)] < label && image[i * width + (j + 1)] == image[i * width + j])
{
label = _minima[i * width + (j + 1)];
}
if (_minima[i * width + (j - 1)] < label && image[i * width + (j - 1)] == image[i * width + j])
{
label = _minima[i * width + (j - 1)];
}
if (_minima[(i + 1) * width + (j + 1)] < label && image[(i + 1) * width + (j + 1)] == image[i * width + j])
{
label = _minima[(i + 1) * width + (j + 1)];
}
if (_minima[(i + 1) * width + (j - 1)] < label && image[(i + 1) * width + (j - 1)] == image[i * width + j])
{
label = _minima[(i + 1) * width + (j - 1)];
}
if (_minima[(i - 1) * width + (j + 1)] < label && image[(i - 1) * width + (j + 1)] == image[i * width + j])
{
label = _minima[(i - 1) * width + (j + 1)];
}
if (_minima[(i - 1) * width + (j - 1)] < label && image[(i - 1) * width + (j - 1)] == image[i * width + j])
{
label = _minima[(i - 1) * width + (j - 1)];
}
if (_minima[(i + 1) * width + j] < label && image[(i + 1) * width + j] == image[i * width + j])
{
label = _minima[(i + 1) * width + j];
}
if (_minima[(i - 1) * width + j] < label && image[(i - 1) * width + j] == image[i * width + j])
{
label = _minima[(i - 1) * width + j];
}
if (label < _minima[i * width + j])
{
if (_minima[_minima[i * width + j]] != label)
{
stable = false;
}
_minima[_minima[i * width + j]] = label;
}
}
}
}
#pragma omp parallel for
for (int i = 1; i < height - 1; i++)
{
for (int j = 1; j < width - 1; j++)
{
if (_minima[i * width + j] > (img_t)PLATEAU)
{
img_t label = _minima[i * width + j];
img_t ref = (img_t)INFINITY;
while (label != ref)
{
ref = label;
label = _minima[ref];
}
if (label != ref)
{
stable = false;
}
_minima[i * width + j] = label;
}
}
}
}
*out = _minima;
}
void watershed_kernel(img_ptr_t image, img_ptr_t in, img_ptr_t *out, int width, int height)
{
img_ptr_t _watershed = (img_ptr_t)calloc(height * width, sizeof(img_t));
if (_watershed == NULL)
{
perror("Failed to allocate memory!\n");
exit(EXIT_FAILURE);
}
memcpy(_watershed, in, height * width * sizeof(img_t));
#pragma omp parallel for
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
_watershed[i * width + j] = abs(_watershed[i * width + j]);
}
}
#pragma omp parallel for
for (int i = 1; i < height - 1; i++)
{
for (int j = 1; j < width - 1; j++)
{
img_t label = _watershed[i * width + j];
if (label != (i * width + j))
{
img_t ref = (img_t)INFINITY;
while (ref != label)
{
ref = label;
label = _watershed[ref];
}
_watershed[i * width + j] = label;
}
}
}
*out = _watershed;
} |
producer-consumer-with-linkedlist.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
struct Node{
int data;
struct Node *next;
};
struct Node* head = NULL;
void insertLast(struct Node** head_ref, int new_data){
struct Node* new_node = (struct Node*) malloc(sizeof(struct Node));
struct Node *last = *head_ref;
new_node->data = new_data;
new_node->next = NULL;
if (*head_ref == NULL){
*head_ref = new_node;
return;
}
while (last->next != NULL)
last = last->next;
last->next = new_node;
return;
}
struct Node* deleteFirst(struct Node** head_ref) {
if (*head_ref == NULL){
printf("List is empty!");
return NULL;
}
struct Node *temp = *head_ref;
*head_ref = temp->next;
return temp;
}
void printList(struct Node *node){
while (node != NULL)
{
printf("%d -> ", node->data);
node = node->next;
}
printf("NULL");
}
void produce(int el){
insertLast(&head, el);
printf("\nProduced %d\n", el);
}
void consume(){
struct Node *temp = deleteFirst(&head);
printf("\nConsumed %d\n", temp->data);
free(temp);
}
int main(){
int id, el = 1;
#pragma omp parallel num_threads(2)
{
id = omp_get_thread_num();
if(id == 0){
while(1){
#pragma omp critical
{
produce(el);
el++;
printf("List: ");
printList(head);
fgetc(stdin);
}
}
} else {
while(1){
#pragma omp critical
{
consume();
printf("List: ");
printList(head);
fgetc(stdin);
}
}
}
}
return 0;
} |
tearprocessing.h | #ifndef TEARPROCESSING_H
#define TEARPROCESSING_H
#include <QDebug>
#include <QList>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <memory>
#include "headers/cpp_interface/fpsoptions.h"
#include "headers/cpp_interface/teardata.h"
//! TODO
class TearProcessing
{
//! constructors
public:
//! TODO
TearProcessing()
: _received_first_frames(false)
, _max_video_count(3)
{
_init_member();
}
//! methods
public:
//! TODO
QList<TearData> check_for_tears(const QList<cv::Mat> & cv_frame_list
, std::shared_ptr<QList<FPSOptions>> shared_fps_options_list)
{
//! TODO
Q_UNUSED(shared_fps_options_list);
//Q_UNUSED(shared_tear_options_list);
QList<cv::Mat> difference_frames;
QList<TearData> tear_data_list;
// default init
for (int i = 0; i < cv_frame_list.size(); ++i) {
difference_frames.push_back(cv::Mat());
const quint64 row_count = static_cast<quint64>(cv_frame_list[i].rows);
tear_data_list.push_back(TearData(row_count));
}
if (!_received_first_frames)
{
_received_first_frames = true;
// save the current frame list
_cache_framelist(cv_frame_list);
// return the normal frames as we can't calculate a difference
return tear_data_list;
} else
{
// if multiple videos are loaded, the cache list has not all frames loaded, wait for next iteration
// refactored this from the loop to allow omp
bool all_cached_frames_filled = true;
for (int i = 0; i < cv_frame_list.size(); ++i)
{
all_cached_frames_filled = all_cached_frames_filled && !_cached_frames[i].empty();
}
// TODO test this for performance
if (all_cached_frames_filled) {
#pragma omp parallel for
for (int i = 0; i < cv_frame_list.size(); ++i)
{
const quint32 pixel_difference = (*shared_fps_options_list)[i].pixel_difference.value();
difference_frames[i] = _get_difference(_cached_frames[i], cv_frame_list[i], pixel_difference).clone();
tear_data_list[i].set_tear_rows(_get_tear_rows(difference_frames[i]));
}
} else {
// we could try to calculate the difference for each frame where we have a previous frame accessable
// but it might be a hassle if the second video got deleted live, because the indices get moved
// it's easier to simply wait until the new frames arrive at the correct index. I should probably
// disable removing files while exporting
}
}
// save the current frame list
_cache_framelist(cv_frame_list);
return tear_data_list;
}
//! TODO
void reset_state()
{
_cached_frames.clear();
_init_member();
}
//! methods
private:
//! TODO
void _init_member()
{
// prepare buffer for each video
for (int i = 0; i < _max_video_count; ++i)
{
_cached_frames.push_back(cv::Mat());
}
// first frames can't be compared
_received_first_frames = false;
}
//! make this chooseable?
cv::Mat _get_difference(const cv::Mat & first_frame, const cv::Mat & second_frame, const quint32 pixel_difference) const
{
cv::Mat difference;
//cv::absdiff(first_frame, second_frame, difference);
_are_equal_with_draw(first_frame, second_frame, static_cast<int>(pixel_difference), difference);
return difference;
}
//! TODO rethink this
//! take a look at https://stackoverflow.com/questions/18464710/how-to-do-per-element-comparison-and-do-different-operation-according-to-result
void _are_equal_with_draw(const cv::Mat & frame_a, const cv::Mat & frame_b, const int pixel_difference, cv::Mat & output) const {
cv::Mat black_white_frame_a;
cv::Mat black_white_frame_b;
cv::cvtColor(frame_a, black_white_frame_a, cv::COLOR_BGRA2GRAY);
cv::cvtColor(frame_b, black_white_frame_b, cv::COLOR_BGRA2GRAY);
output = frame_a.clone();
for (int i = 0; i < black_white_frame_a.rows; i += 1) {
for (int j = 0; j < black_white_frame_a.cols; j += 1) {
int ac(std::max(black_white_frame_a.at<uchar>(i, j)
, black_white_frame_b.at<uchar>(i, j)));
int bc(std::min(black_white_frame_a.at<uchar>(i, j)
, black_white_frame_b.at<uchar>(i, j)));
if (ac - bc > pixel_difference) {
// on difference, set to white
output.at<cv::Vec3b>(i,j)[0] = 255;
output.at<cv::Vec3b>(i,j)[1] = 255;
output.at<cv::Vec3b>(i,j)[2] = 255;
} else {
// on "same" pixel, set to black
output.at<cv::Vec3b>(i,j)[0] = 0;
output.at<cv::Vec3b>(i,j)[1] = 0;
output.at<cv::Vec3b>(i,j)[2] = 0;
}
}
}
}
//! TODO
void _cache_framelist(const QList<cv::Mat> _other)
{
for (int i = 0; i < _other.size(); ++i)
{
_cached_frames[i] = _other[i].clone();
}
}
//! get the rows where we detect a tear (may not be ordered from 0 to max_rows, e.g 0 3 2)
std::vector<quint64> _get_tear_rows(const cv::Mat & difference) const
{
std::vector<quint64> tear_rows;
#pragma omp parallel for
for (int row = 0; row < difference.rows; ++row)
{
if (_is_row_a_tear(difference, row))
{
tear_rows.push_back(static_cast<quint64>(row));
}
}
return tear_rows;
}
//! short circuits if any pixel in a row is found not to be full black (0,0,0)
bool _is_row_a_tear(const cv::Mat & difference, const int row) const
{
for (int col = 0; col < difference.cols; ++col)
{
bool red_channel_is_not_black = difference.at<cv::Vec3b>(row,col)[0] != 0;
bool green_channel_is_not_black = difference.at<cv::Vec3b>(row,col)[1] != 0;
bool blue_channel_is_not_black = difference.at<cv::Vec3b>(row,col)[2] != 0;
bool pixel_is_not_black = red_channel_is_not_black && green_channel_is_not_black && blue_channel_is_not_black;
// if the difference frame is not black, we detected a change in the subsequent image
if (pixel_is_not_black)
{
return false;
}
}
return true;
}
//! member
private:
//! TODO
bool _received_first_frames;
//! TODO
QList<cv::Mat> _cached_frames;
//! TODO
const quint8 _max_video_count;
};
#endif // TEARPROCESSING_H
|
unnamedCritical.c |
// OpenMP Unnamed Critical Example
// Inclusions
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
// Main
int main( int argc, char** argv ) {
int *a = malloc( 25 * sizeof( int ) ); // Array of Values
int i = 0; // Loop Iterator
int n = 25; // Number of Iteratins
int localSum = 0; // Private Local Sum for Each Core
int totalSum = 0; // Shared Total Sum for All Cores
int thread = 0; // Thread Number
float start = 0.0; // Start Time
float end = 0.0; // End Time
float time = 0.0; // Elapsed Time
// Fill Array with Values 1 to 25
for( i = 0; i < n; i++ ) {
a[i] = i + 1;
}
// Parallel Region
#pragma omp parallel shared( n, a, totalSum ) private( thread, localSum )
// Share Number of Iterations, Array, and the Total Sum
// Keep the Thread Number and Local Sum Private
{
thread = omp_get_thread_num( ); // Get the Thread Number
localSum = 0; // Preset Local Sum to Zero
#pragma omp for // Parallelize the Next For
for( i = 0; i < n; i++ ) {
localSum += a[i]; // Accumulate Array Values into Local Sum
}
start = omp_get_wtime( );
#pragma omp critical // Unnamed Critical Region - blocks both.
{
totalSum += localSum; // Accumulate Local Sum Values into Total Sum
printf( "Thread %d has local sum %d and adds to total sum %d.\n",
thread, localSum, totalSum );
}
#pragma omp critical // Unnamed Critical Region - blocks both.
{
printf( "I'm an unnamed critical region.\n" );
}
end = omp_get_wtime( );
}
time = end - start;
printf( "Total sum at end is %d.\nTime: %0.9lf\n", totalSum, time );
free( a );
return 0;
}
// End unnamedCritical.c - EWG SDG
|
CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
class OpenMPIRBuilder;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
class IdentifierInfo;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
struct DependData {
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
const Expr *IteratorExpr = nullptr;
SmallVector<const Expr *, 4> DepExprs;
explicit DependData() = default;
DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
: DepKind(DepKind), IteratorExpr(IteratorExpr) {}
};
SmallVector<DependData, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
bool IsReductionWithTaskMod = false;
bool IsWorksharingReduction = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the item shared between tasks to reduce into.
const Expr *Shared = nullptr;
/// Reference to the original item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
const Expr *ReductionOp)
: Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// List of addresses of original variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedLVal Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, LValue SharedLVal,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for the shared and original reduction item.
/// \param N Number of the reduction item.
void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedLVal Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns LValue for the original reduction item.
LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqueName;
LValue IVLVal;
llvm::Function *Fn = nullptr;
bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
enum class ActionToDo {
DoNotPush,
PushAsLastprivateConditional,
DisableLastprivateConditional,
};
CodeGenModule &CGM;
ActionToDo Action = ActionToDo::DoNotPush;
/// Check and try to disable analysis of inner regions for changes in
/// lastprivate conditional.
void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
llvm::DenseSet<CanonicalDeclPtr<const Decl>>
&NeedToAddForLPCsAsDisabled) const;
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
public:
explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
LValue IVLVal);
static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// An OpenMP-IR-Builder instance.
llvm::OpenMPIRBuilder OMPBuilder;
/// Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource = nullptr;
using FlagsTy = std::pair<unsigned, unsigned>;
/// Map of flags and corresponding default locations.
using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
Address getOrCreateDefaultLocation(unsigned Flags);
QualType IdentQTy;
llvm::StructType *IdentTy = nullptr;
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Maps local variables marked as lastprivate conditional to their internal
/// types.
llvm::DenseMap<llvm::Function *,
llvm::DenseMap<CanonicalDeclPtr<const Decl>,
std::tuple<QualType, const FieldDecl *,
const FieldDecl *, LValue>>>
LastprivateConditionalToTypes;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// Type typedef struct kmp_task_affinity_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool flag1 : 1;
/// bool flag2 : 1;
/// kmp_int32 reserved : 30;
/// } flags;
/// } kmp_task_affinity_info_t;
QualType KmpTaskAffinityInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// int32_t flags;
/// int32_t reserved;
/// };
QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Atomic ordering from the omp requires directive.
llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Returns default address space for the constant firstprivates, 0 by
/// default.
virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
SourceLocation Loc);
/// Returns the number of the elements and the address of the depobj
/// dependency array.
/// \return Number of elements in depobj array and the pointer to the array of
/// dependencies.
std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
LValue DepobjLVal,
SourceLocation Loc);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Registers provided target firstprivate variable as global on the
/// target.
llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
const VarDecl *VD);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library.
llvm::Value *MapTypesArray = nullptr;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void processRequiresDirective(const OMPRequiresDecl *D);
/// Gets default memory ordering as specified in requires directive.
llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Create specialized alloca to handle lastprivate conditionals.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Checks if the lastprivate conditional was updated in inner region and
/// writes the value.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;bool Fired = false;
/// #pragma omp ... shared(a)
/// {
/// lp_a = ...;
/// Fired = true;
/// }
/// if (Fired) {
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// Fired = false;
/// }
/// \endcode
virtual void checkAndEmitSharedLastprivateConditional(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs).
/// \returns Pointer to the first element of the array casted to VoidPtr type.
std::pair<llvm::Value *, Address>
emitDependClause(CodeGenFunction &CGF,
ArrayRef<OMPTaskDataTy::DependData> Dependencies,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs) for depobj construct. In this case, the
/// variable is allocated in dynamically. \returns Pointer to the first
/// element of the array casted to VoidPtr type.
Address emitDepobjDependClause(CodeGenFunction &CGF,
const OMPTaskDataTy::DependData &Dependencies,
SourceLocation Loc);
/// Emits the code to destroy the dependency object provided in depobj
/// directive.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc);
/// Updates the dependency kind in the specified depobj object.
/// \param DepobjLVal LValue for the main depobj object.
/// \param NewDepKind New dependency kind.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
/// Initializes user defined allocators specified in the uses_allocators
/// clauses.
void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
const Expr *AllocatorTraits);
/// Destroys user defined allocators specified in the uses_allocators clause.
void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
mat_mul_p4a_7000.c | /*
* file for mat_mul.c
*/
#include "./mat_mul.h"
#include "./size.h"
void mat_mul(int *a, int *b, int *c);
void mat_mul(int *a, int *b, int *c)
{
int i, j, k, t;
#pragma omp parallel for private(j, t, k)
for(i = 0; i <= 6999; i += 1)
for(j = 0; j <= 6999; j += 1) {
c[i*7000+j] = 0;
for(k = 0; k <= 6999; k += 1)
for(t = 0; t <= 99; t += 1)
c[i*7000+j] += a[i*7000+k]*b[j*7000+k];
}
return;
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone_info,
*last;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=0.0;
count=0;
if ((morphology_traits & BlendPixelTrait) == 0)
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology:
{
pixel=bias;
break;
}
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
case HitAndMissMorphology:
case ErodeMorphology:
{
pixel=QuantumRange;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if ((morphology_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentually white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showKernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showKernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showKernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showKernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
omp_taskwait.c | // RUN: %libomp-compile-and-run
// This test is known to be fragile on NetBSD kernel at the moment,
// https://bugs.llvm.org/show_bug.cgi?id=42020.
// UNSUPPORTED: netbsd
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int test_omp_taskwait()
{
int result1 = 0; /* Stores number of not finished tasks after the taskwait */
int result2 = 0; /* Stores number of wrong array elements at the end */
int array[NUM_TASKS];
int i;
/* fill array */
for (i = 0; i < NUM_TASKS; i++)
array[i] = 0;
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
/* First we have to store the value of the loop index in a new variable
* which will be private for each task because otherwise it will be overwritten
* if the execution of the task takes longer than the time which is needed to
* enter the next step of the loop!
*/
int myi;
myi = i;
#pragma omp task
{
my_sleep (SLEEPTIME);
array[myi] = 1;
} /* end of omp task */
} /* end of for */
#pragma omp taskwait
/* check if all tasks were finished */
for (i = 0; i < NUM_TASKS; i++)
if (array[i] != 1)
result1++;
/* generate some more tasks which now shall overwrite
* the values in the tids array */
for (i = 0; i < NUM_TASKS; i++) {
int myi;
myi = i;
#pragma omp task
{
array[myi] = 2;
} /* end of omp task */
} /* end of for */
} /* end of single */
} /*end of parallel */
/* final check, if all array elements contain the right values: */
for (i = 0; i < NUM_TASKS; i++) {
if (array[i] != 2)
result2++;
}
return ((result1 == 0) && (result2 == 0));
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_taskwait()) {
num_failed++;
}
}
return num_failed;
}
|
parallel_for.h | /*
Copyright (c) 2013, Taiga Nomi and the respective contributors
All rights reserved.
Use of this source code is governed by a BSD-style license that can be found
in the LICENSE file.
*/
#pragma once
#include <cassert>
#include <cstdio>
#include <limits>
#include <string>
#include <type_traits>
#include <vector>
#include "aligned_allocator.h"
#include "nn_error.h"
#include "tiny_dnn/config.h"
#ifdef CNN_USE_TBB
#ifndef NOMINMAX
#define NOMINMAX // tbb includes windows.h in tbb/machine/windows_api.h
#endif
#include <tbb/task_group.h>
#include <tbb/tbb.h>
#endif
#if !defined(CNN_USE_OMP) && !defined(CNN_SINGLE_THREAD)
#include <future>
#include <thread>
#endif
#if defined(CNN_USE_GCD) && !defined(CNN_SINGLE_THREAD)
#include <dispatch/dispatch.h>
#endif
namespace tiny_dnn {
#ifdef CNN_USE_TBB
static tbb::task_scheduler_init tbbScheduler(
tbb::task_scheduler_init::automatic); // tbb::task_scheduler_init::deferred);
typedef tbb::blocked_range<size_t> blocked_range;
template <typename Func>
void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) {
assert(end >= begin);
tbb::parallel_for(
blocked_range(begin, end, end - begin > grainsize ? grainsize : 1), f);
}
template <typename Func>
void xparallel_for(size_t begin, size_t end, const Func &f) {
f(blocked_range(begin, end, 100));
}
#else
struct blocked_range {
typedef size_t const_iterator;
blocked_range(size_t begin, size_t end) : begin_(begin), end_(end) {}
blocked_range(int begin, int end)
: begin_(static_cast<size_t>(begin)), end_(static_cast<size_t>(end)) {}
const_iterator begin() const { return begin_; }
const_iterator end() const { return end_; }
private:
size_t begin_;
size_t end_;
};
template <typename Func>
void xparallel_for(size_t begin, size_t end, const Func &f) {
blocked_range r(begin, end);
f(r);
}
#if defined(CNN_USE_OMP)
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
assert(end >= begin);
#pragma omp parallel for
for (size_t i = begin; i < end; ++i) f(blocked_range(i, i + 1));
}
#elif defined(CNN_USE_GCD)
template <typename Func>
void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) {
assert(end >= begin);
size_t count = end - begin;
size_t blockSize = grainsize;
if (count < blockSize || blockSize == 0) {
blockSize = 1;
}
size_t blockCount = (count + blockSize - 1) / blockSize;
assert(blockCount > 0);
dispatch_apply(blockCount, dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0),
^(size_t block) {
size_t blockStart = block * blockSize;
size_t blockEnd = blockStart + blockSize;
if (blockEnd > end) {
blockEnd = end;
}
assert(blockStart < blockEnd);
f(blocked_range(blockStart, blockEnd));
});
}
#elif defined(CNN_SINGLE_THREAD)
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
xparallel_for(begin, end, f);
}
#else
template <typename Func>
void parallel_for(size_t begin,
size_t end,
const Func &f,
size_t /*grainsize*/) {
assert(end >= begin);
size_t nthreads = std::thread::hardware_concurrency();
size_t blockSize = (end - begin) / nthreads;
if (blockSize * nthreads < end - begin) blockSize++;
std::vector<std::future<void> > futures;
size_t blockBegin = begin;
size_t blockEnd = blockBegin + blockSize;
if (blockEnd > end) blockEnd = end;
for (size_t i = 0; i < nthreads; i++) {
futures.push_back(
std::move(std::async(std::launch::async, [blockBegin, blockEnd, &f] {
f(blocked_range(blockBegin, blockEnd));
})));
blockBegin += blockSize;
blockEnd = blockBegin + blockSize;
if (blockBegin >= end) break;
if (blockEnd > end) blockEnd = end;
}
for (auto &future : futures) future.wait();
}
#endif
#endif // CNN_USE_TBB
template <typename T, typename U>
bool value_representation(U const &value) {
return static_cast<U>(static_cast<T>(value)) == value;
}
template <typename T, typename Func>
inline void for_(std::true_type,
bool parallelize,
size_t begin,
T end,
Func f,
int grainsize = 100) {
parallelize = parallelize && value_representation<size_t>(end);
parallelize ? parallel_for(begin, static_cast<size_t>(end), f, grainsize)
: xparallel_for(begin, static_cast<size_t>(end), f);
}
template <typename T, typename Func>
inline void for_(std::false_type,
bool parallelize,
size_t begin,
T end,
Func f,
int grainsize = 100) {
parallelize ? parallel_for(begin, static_cast<size_t>(end), f, grainsize)
: xparallel_for(begin, end, f);
}
template <typename T, typename Func>
inline void for_(
bool parallelize, size_t begin, T end, Func f, size_t grainsize = 100) {
static_assert(std::is_integral<T>::value, "end must be integral type");
for_(typename std::is_unsigned<T>::type(), parallelize, begin, end, f,
grainsize);
}
template <typename T, typename Func>
inline void for_i(bool parallelize, T size, Func f, size_t grainsize = 100) {
#ifdef CNN_SINGLE_THREAD
for (size_t i = 0; i < size; ++i) {
f(i);
}
#else // #ifdef CNN_SINGLE_THREAD
for_(parallelize, 0, size,
[&](const blocked_range &r) {
#ifdef CNN_USE_OMP
#pragma omp parallel for
#endif
for (size_t i = r.begin(); i < r.end(); i++) {
f(i);
}
},
grainsize);
#endif // #ifdef CNN_SINGLE_THREAD
}
template <typename T, typename Func>
inline void for_i(T size, Func f, size_t grainsize = 100) {
for_i(true, size, f, grainsize);
}
} // namespace tiny_dnn
|
meta_when_default.c | int main()
{
int n = 10;
#pragma omp metadirective when(user={condition(n<5)}:) default(parallel for)
for(int i=0; i<n; i++)
;
return 0;
}
|
camp.c | /* (c) 1996,1997 Peter Sanders, Ingo Boesnach */
/* simulate a cellular automaton (serial version)
* periodic boundaries
*
* #1: Number of lines
* #2: Number of iterations to be simulated
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <omp.h>
#include "random.h"
#include "md5tool.h"
/* horizontal size of the configuration */
#define XSIZE 1024
/* "ADT" State and line of states (plus border) */
typedef char State;
typedef State Line[XSIZE + 2];
/* determine random integer between 0 and n-1 */
#define randInt(n) ((int)(nextRandomLEcuyer() * n))
/* --------------------- CA simulation -------------------------------- */
int n_threads =1;
/* random starting configuration */
static void initConfig(Line *buf, int lines){
int x, y;
initRandomLEcuyer(424243);
for (y = 1; y <= lines; y++) {
for (x = 1; x <= XSIZE; x++) {
buf[y][x] = randInt(100) >= 50;
}
}
}
/* annealing rule from ChoDro96 page 34
* the table is used to map the number of nonzero
* states in the neighborhood to the new state
*/
static State anneal[10] = {0, 0, 0, 0, 1, 0, 1, 1, 1, 1};
/* a: pointer to array; x,y: coordinates; result: n-th element of anneal,
where n is the number of neighbors */
#define transition(a, x, y) \
(anneal[(a)[(y)-1][(x)-1] + (a)[(y)][(x)-1] + (a)[(y)+1][(x)-1] +\
(a)[(y)-1][(x) ] + (a)[(y)][(x) ] + (a)[(y)+1][(x) ] +\
(a)[(y)-1][(x)+1] + (a)[(y)][(x)+1] + (a)[(y)+1][(x)+1]])
/* treat torus like boundary conditions */
static void boundary(Line *buf, int lines){
int x,y;
for (y = 0; y <= lines+1; y++) {
/* copy rightmost column to the buffer column 0 */
buf[y][0 ] = buf[y][XSIZE];
/* copy leftmost column to the buffer column XSIZE + 1 */
buf[y][XSIZE+1] = buf[y][1 ];
}
for (x = 0; x <= XSIZE+1; x++) {
/* copy bottommost row to buffer row 0 */
buf[0][x ] = buf[lines][x];
/* copy topmost row to buffer row lines + 1 */
buf[lines+1][x] = buf[1][x ];
}
}
/* make one simulation iteration with lines lines.
* old configuration is in from, new one is written to to.
*/
static void simulate(Line *from, Line *to, int lines){
int x,y;
boundary(from, lines);
#pragma omp parallel for num_threads(n_threads) shared(to) private(x,y)
for (y = 1; y <= lines; y++) {
for (x = 1; x <= XSIZE; x++) {
to[y][x ] = transition(from, x , y);
}
}
}
/* --------------------- measurement ---------------------------------- */
int main(int argc, char** argv){
int lines, its;
int i;
Line *from, *to, *temp;
char* hash;
assert(argc == 4);
lines = atoi(argv[1]);
its = atoi(argv[2]);
n_threads = atoi(argv[3]);
printf("%d\n",n_threads);
from = malloc((lines + 2) * sizeof(Line));
to = malloc((lines + 2) * sizeof(Line));
initConfig(from, lines);
for (i = 0; i < its; i++) {
simulate(from, to, lines);
temp = from;
from = to;
to = temp;
}
hash = getMD5DigestStr(from[1], sizeof(Line) * (lines));
printf("hash: %s\n", hash);
free(from);
free(to);
free(hash);
return EXIT_SUCCESS;
}
|
csr5_spmv_avx2.h | #ifndef CSR5_SPMV_AVX2_H
#define CSR5_SPMV_AVX2_H
#include "common_avx2.h"
#include "utils_avx2.h"
template<typename iT, typename vT>
inline void partition_fast_track(const vT *d_value_partition,
const vT *d_x,
const iT *d_column_index_partition,
vT *d_calibrator,
vT *d_y,
const iT row_start,
const iT par_id,
const int tid,
const iT start_row_start,
const vT alpha,
const int sigma,
const int stride_vT,
const bool direct)
{
__m256d sum256d = _mm256_setzero_pd();
__m256d value256d, x256d;
vT x256d0, x256d1, x256d2, x256d3;
#pragma unroll(ANONYMOUSLIB_CSR5_SIGMA)
for (int i = 0; i < ANONYMOUSLIB_CSR5_SIGMA; i++)
{
value256d = _mm256_load_pd(&d_value_partition[i * ANONYMOUSLIB_CSR5_OMEGA]);
x256d0 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA]];
x256d1 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 1]];
x256d2 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 2]];
x256d3 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 3]];
x256d = _mm256_set_pd(x256d3, x256d2, x256d1, x256d0);
sum256d = _mm256_fmadd_pd(value256d, x256d, sum256d);
}
vT sum = hsum_avx(sum256d);
if (row_start == start_row_start && !direct)
d_calibrator[tid * stride_vT] += sum;
else{
if(direct)
d_y[row_start] = sum;
else
d_y[row_start] += sum;
}
}
template<typename iT, typename uiT, typename vT>
void spmv_csr5_compute_kernel(const iT *d_column_index,
const vT *d_value,
const iT *d_row_pointer,
const vT *d_x,
const uiT *d_partition_pointer,
const uiT *d_partition_descriptor,
const iT *d_partition_descriptor_offset_pointer,
const iT *d_partition_descriptor_offset,
vT *d_calibrator,
vT *d_y,
const iT p,
const int num_packet,
const int bit_y_offset,
const int bit_scansum_offset,
const vT alpha,
const int c_sigma)
{
const int num_thread = omp_get_max_threads();
const int chunk = ceil((double)(p-1) / (double)num_thread);
const int stride_vT = ANONYMOUSLIB_X86_CACHELINE / sizeof(vT);
const int num_thread_active = ceil((p-1.0)/chunk);
#pragma omp parallel
{
int tid = omp_get_thread_num();
iT start_row_start = tid < num_thread_active ? d_partition_pointer[tid * chunk] & 0x7FFFFFFF : 0;
vT s_sum[8]; // allocate a cache line
vT s_first_sum[8]; // allocate a cache line
uint64_t s_cond[8]; // allocate a cache line
int s_y_idx[16]; // allocate a cache line
int inc0, inc1, inc2, inc3;
vT x256d0, x256d1, x256d2, x256d3;
__m128i *d_column_index_partition128i;
__m128i *d_partition_descriptor128i;
__m256d sum256d = _mm256_setzero_pd();
__m256d tmp_sum256d = _mm256_setzero_pd();
__m256d first_sum256d = _mm256_setzero_pd();
__m256d last_sum256d = _mm256_setzero_pd();
__m128i scansum_offset128i, y_offset128i, y_idx128i;
__m256i start256i;
__m256i stop256i = _mm256_setzero_si256();
__m256d value256d, x256d;
__m256i local_bit256i;
__m256i direct256i;
__m128i descriptor128i;
__m256i tmp256i;
#pragma omp for schedule(static, chunk)
for (int par_id = 0; par_id < p - 1; par_id++)
{
const iT *d_column_index_partition = &d_column_index[par_id * ANONYMOUSLIB_CSR5_OMEGA * c_sigma];
const vT *d_value_partition = &d_value[par_id * ANONYMOUSLIB_CSR5_OMEGA * c_sigma];
uiT row_start = d_partition_pointer[par_id];
const iT row_stop = d_partition_pointer[par_id + 1] & 0x7FFFFFFF;
if (row_start == row_stop) // fast track through reduction
{
// check whether the the partition contains the first element of row "row_start"
// => we are the first writing data to d_y[row_start]
bool fast_direct = (d_partition_descriptor[par_id * ANONYMOUSLIB_CSR5_OMEGA * num_packet] >>
(31 - (bit_y_offset + bit_scansum_offset)) & 0x1);
partition_fast_track<iT, vT>
(d_value_partition, d_x, d_column_index_partition,
d_calibrator, d_y, row_start, par_id, tid, start_row_start, alpha, c_sigma, stride_vT, fast_direct);
}
else // normal track for all the other partitions
{
const bool empty_rows = (row_start >> 31) & 0x1;
row_start &= 0x7FFFFFFF;
vT *d_y_local = &d_y[row_start+1];
const int offset_pointer = empty_rows ? d_partition_descriptor_offset_pointer[par_id] : 0;
d_column_index_partition128i = (__m128i *)d_column_index_partition;
d_partition_descriptor128i = (__m128i *)&d_partition_descriptor[par_id * ANONYMOUSLIB_CSR5_OMEGA * num_packet];
first_sum256d = _mm256_setzero_pd();
stop256i = _mm256_setzero_si256();
descriptor128i = _mm_load_si128(d_partition_descriptor128i);
y_offset128i = _mm_srli_epi32(descriptor128i, 32 - bit_y_offset);
scansum_offset128i = _mm_slli_epi32(descriptor128i, bit_y_offset);
scansum_offset128i = _mm_srli_epi32(scansum_offset128i, 32 - bit_scansum_offset);
descriptor128i = _mm_slli_epi32(descriptor128i, bit_y_offset + bit_scansum_offset);
// remember if the first element of this partition is the first element of a new row
local_bit256i = _mm256_cvtepu32_epi64(_mm_srli_epi32(descriptor128i, 31));
bool first_direct = false;
_mm256_store_si256((__m256i *)s_cond, local_bit256i);
if(s_cond[0])
first_direct = true;
// remember if the first element of the first partition of the current thread is the first element of a new row
bool first_all_direct = false;
if(par_id == tid * chunk)
first_all_direct = first_direct;
descriptor128i = _mm_or_si128(descriptor128i, _mm_set_epi32(0, 0, 0, 0x80000000));
local_bit256i = _mm256_cvtepu32_epi64(_mm_srli_epi32(descriptor128i, 31));
start256i = _mm256_sub_epi64(_mm256_set1_epi64x(0x1), local_bit256i);
direct256i = _mm256_and_si256(local_bit256i, _mm256_set_epi64x(0x1, 0x1, 0x1, 0));
value256d = _mm256_load_pd(d_value_partition);
x256d0 = d_x[d_column_index_partition[0]];
x256d1 = d_x[d_column_index_partition[1]];
x256d2 = d_x[d_column_index_partition[2]];
x256d3 = d_x[d_column_index_partition[3]];
x256d = _mm256_set_pd(x256d3, x256d2, x256d1, x256d0);
sum256d = _mm256_mul_pd(value256d, x256d);
// step 1. thread-level seg sum
#if ANONYMOUSLIB_CSR5_SIGMA > 23
int ly = 0;
#endif
for (int i = 1; i < ANONYMOUSLIB_CSR5_SIGMA; i++)
{
x256d0 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA]];
x256d1 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 1]];
x256d2 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 2]];
x256d3 = d_x[d_column_index_partition[i * ANONYMOUSLIB_CSR5_OMEGA + 3]];
x256d = _mm256_set_pd(x256d3, x256d2, x256d1, x256d0);
#if ANONYMOUSLIB_CSR5_SIGMA > 23
int norm_i = i - (32 - bit_y_offset - bit_scansum_offset);
if (!(ly || norm_i) || (ly && !(norm_i % 32)))
{
ly++;
descriptor128i = _mm_load_si128(&d_partition_descriptor128i[ly]);
}
norm_i = !ly ? i : norm_i;
norm_i = 31 - norm_i % 32;
local_bit256i = _mm256_and_si256(_mm256_cvtepu32_epi64(_mm_srli_epi32(descriptor128i, norm_i)), _mm256_set1_epi64x(0x1));
#else
local_bit256i = _mm256_and_si256(_mm256_cvtepu32_epi64(_mm_srli_epi32(descriptor128i, 31-i)), _mm256_set1_epi64x(0x1));
#endif
int store_to_offchip = _mm256_testz_si256(local_bit256i, _mm256_set1_epi64x(0xFFFFFFFFFFFFFFFF));
if (!store_to_offchip)
{
y_idx128i = empty_rows ? _mm_i32gather_epi32 (&d_partition_descriptor_offset[offset_pointer], y_offset128i, 4) : y_offset128i;
// mask scatter store
_mm_store_si128((__m128i *)s_y_idx, y_idx128i);
_mm256_store_pd(s_sum, sum256d);
_mm256_store_si256((__m256i *)s_cond, _mm256_and_si256(direct256i, local_bit256i));
inc0 = 0, inc1 = 0, inc2 = 0, inc3 = 0;
if (s_cond[0]) {d_y_local[s_y_idx[0]] = s_sum[0]; inc0 = 1;}
if (s_cond[1]) {d_y_local[s_y_idx[1]] = s_sum[1]; inc1 = 1;}
if (s_cond[2]) {d_y_local[s_y_idx[2]] = s_sum[2]; inc2 = 1;}
if (s_cond[3]) {d_y_local[s_y_idx[3]] = s_sum[3]; inc3 = 1;}
y_offset128i = _mm_add_epi32(y_offset128i, _mm_set_epi32(inc3, inc2, inc1, inc0));
tmp256i = _mm256_andnot_si256(
_mm256_cmpeq_epi64(direct256i, _mm256_set1_epi64x(0x1)),
_mm256_cmpeq_epi64(local_bit256i, _mm256_set1_epi64x(0x1)));
first_sum256d = _mm256_add_pd(
_mm256_and_pd(_mm256_castsi256_pd(_mm256_cmpeq_epi64(tmp256i, _mm256_set1_epi64x(0))), first_sum256d),
_mm256_and_pd(_mm256_castsi256_pd(_mm256_cmpeq_epi64(tmp256i, _mm256_set1_epi64x(0xFFFFFFFFFFFFFFFF))), sum256d));
sum256d = _mm256_and_pd(_mm256_castsi256_pd(_mm256_cmpeq_epi64(local_bit256i, _mm256_set1_epi64x(0))), sum256d);
direct256i = _mm256_or_si256(direct256i, local_bit256i);
stop256i = _mm256_add_epi64(stop256i, local_bit256i);
}
value256d = _mm256_load_pd(&d_value_partition[i * ANONYMOUSLIB_CSR5_OMEGA]);
sum256d = _mm256_fmadd_pd(value256d, x256d, sum256d);
}
tmp256i = _mm256_cmpeq_epi64(direct256i, _mm256_set1_epi64x(0x1));
first_sum256d = _mm256_and_pd(_mm256_castsi256_pd(tmp256i), first_sum256d);
tmp256i = _mm256_cmpeq_epi64(tmp256i, _mm256_set1_epi64x(0));
first_sum256d = _mm256_add_pd(first_sum256d, _mm256_and_pd(_mm256_castsi256_pd(tmp256i), sum256d));
last_sum256d = sum256d;
tmp256i = _mm256_cmpeq_epi64(start256i, _mm256_set1_epi64x(0x1));
sum256d = _mm256_and_pd(_mm256_castsi256_pd(tmp256i), first_sum256d);
sum256d = _mm256_permute4x64_pd(sum256d, 0xFFFFFF39);
sum256d = _mm256_and_pd(_mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x0000000000000000)), sum256d);
tmp_sum256d = sum256d;
sum256d = hscan_avx(sum256d);
scansum_offset128i = _mm_add_epi32(scansum_offset128i, _mm_set_epi32(3, 2, 1, 0));
tmp256i = _mm256_castsi128_si256(scansum_offset128i);
tmp256i = _mm256_permutevar8x32_epi32(tmp256i, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0));
tmp256i = _mm256_add_epi32(tmp256i, tmp256i);
tmp256i = _mm256_add_epi32(tmp256i, _mm256_set_epi32(1, 0, 1, 0, 1, 0, 1, 0));
sum256d = _mm256_sub_pd(_mm256_castsi256_pd(_mm256_permutevar8x32_epi32(_mm256_castpd_si256(sum256d), tmp256i)), sum256d);
sum256d = _mm256_add_pd(sum256d, tmp_sum256d);
tmp256i = _mm256_cmpgt_epi64(start256i, stop256i);
tmp256i = _mm256_cmpeq_epi64(tmp256i, _mm256_set1_epi64x(0));
last_sum256d = _mm256_add_pd(last_sum256d, _mm256_and_pd(_mm256_castsi256_pd(tmp256i), sum256d));
y_idx128i = empty_rows ? _mm_i32gather_epi32 (&d_partition_descriptor_offset[offset_pointer], y_offset128i, 4) : y_offset128i;
_mm256_store_si256((__m256i *)s_cond, direct256i);
_mm_store_si128((__m128i *)s_y_idx, y_idx128i);
_mm256_store_pd(s_sum, last_sum256d);
if (s_cond[0]) {d_y_local[s_y_idx[0]] = s_sum[0]; _mm256_store_pd(s_first_sum, first_sum256d);}
if (s_cond[1]) d_y_local[s_y_idx[1]] = s_sum[1];
if (s_cond[2]) d_y_local[s_y_idx[2]] = s_sum[2];
if (s_cond[3]) d_y_local[s_y_idx[3]] = s_sum[3];
// only use calibrator if this partition does not contain the first element of the row "row_start"
if (row_start == start_row_start && !first_all_direct)
d_calibrator[tid * stride_vT] += s_cond[0] ? s_first_sum[0] : s_sum[0];
else{
if(first_direct)
d_y[row_start] = s_cond[0] ? s_first_sum[0] : s_sum[0];
else
d_y[row_start] += s_cond[0] ? s_first_sum[0] : s_sum[0];
}
}
}
}
}
template<typename iT, typename uiT, typename vT>
void spmv_csr5_calibrate_kernel(const uiT *d_partition_pointer,
vT *d_calibrator,
vT *d_y,
const iT p)
{
int num_thread = omp_get_max_threads();
int chunk = ceil((double)(p-1) / (double)num_thread);
int stride_vT = ANONYMOUSLIB_X86_CACHELINE / sizeof(vT);
// calculate the number of maximal active threads (for a static loop scheduling with size chunk)
int num_thread_active = ceil((p-1.0)/chunk);
int num_cali = num_thread_active < num_thread ? num_thread_active : num_thread;
for (int i = 0; i < num_cali; i++)
{
d_y[(d_partition_pointer[i * chunk] << 1) >> 1] += d_calibrator[i * stride_vT];
}
}
template<typename iT, typename uiT, typename vT>
void spmv_csr5_tail_partition_kernel(const iT *d_row_pointer,
const iT *d_column_index,
const vT *d_value,
const vT *d_x,
vT *d_y,
const iT tail_partition_start,
const iT p,
const iT m,
const int sigma,
const vT alpha)
{
const iT index_first_element_tail = (p - 1) * ANONYMOUSLIB_CSR5_OMEGA * sigma;
#pragma omp parallel for
for (iT row_id = tail_partition_start; row_id < m; row_id++)
{
const iT idx_start = row_id == tail_partition_start ? (p - 1) * ANONYMOUSLIB_CSR5_OMEGA * sigma : d_row_pointer[row_id];
const iT idx_stop = d_row_pointer[row_id + 1];
vT sum = 0;
for (iT idx = idx_start; idx < idx_stop; idx++)
sum += d_value[idx] * d_x[d_column_index[idx]];// * alpha;
if(row_id == tail_partition_start && d_row_pointer[row_id] != index_first_element_tail){
d_y[row_id] = d_y[row_id] + sum;
}else{
d_y[row_id] = sum;
}
}
}
template<typename ANONYMOUSLIB_IT, typename ANONYMOUSLIB_UIT, typename ANONYMOUSLIB_VT>
int csr5_spmv(const int sigma,
const ANONYMOUSLIB_IT p,
const ANONYMOUSLIB_IT m,
const int bit_y_offset,
const int bit_scansum_offset,
const int num_packet,
const ANONYMOUSLIB_IT *row_pointer,
const ANONYMOUSLIB_IT *column_index,
const ANONYMOUSLIB_VT *value,
const ANONYMOUSLIB_UIT *partition_pointer,
const ANONYMOUSLIB_UIT *partition_descriptor,
const ANONYMOUSLIB_IT *partition_descriptor_offset_pointer,
const ANONYMOUSLIB_IT *partition_descriptor_offset,
ANONYMOUSLIB_VT *calibrator,
const ANONYMOUSLIB_IT tail_partition_start,
const ANONYMOUSLIB_VT alpha,
const ANONYMOUSLIB_VT *x,
ANONYMOUSLIB_VT *y)
{
int err = ANONYMOUSLIB_SUCCESS;
spmv_csr5_compute_kernel
<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT, ANONYMOUSLIB_VT>
(column_index, value, row_pointer, x,
partition_pointer, partition_descriptor,
partition_descriptor_offset_pointer, partition_descriptor_offset,
calibrator, y, p,
num_packet, bit_y_offset, bit_scansum_offset, alpha, sigma);
spmv_csr5_calibrate_kernel
<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT, ANONYMOUSLIB_VT>
(partition_pointer, calibrator, y, p);
spmv_csr5_tail_partition_kernel
<ANONYMOUSLIB_IT, ANONYMOUSLIB_UIT, ANONYMOUSLIB_VT>
(row_pointer, column_index, value, x, y,
tail_partition_start, p, m, sigma, alpha);
return err;
}
#endif // CSR5_SPMV_AVX2_H
|
twopoint_function_generic.h | #ifndef _TWOPOINT_FUNCTION_GENERIC_H
#define _TWOPOINT_FUNCTION_GENERIC_H
#include "prop_sitematrix_getter.h"
#include "spin_flav_op.h"
CPS_START_NAMESPACE
//Assume form
// coeff * \sum_y e^{i(p_psibar + p_psi)y} Tr{ ( prop1(y,-p_psi;tsrc) )^dag SinkOp prop2(y,p_psibar;tsrc) SrcOp }
//use_opposite_sink_mom optionally flips the sign of the sink momentum to the 'wrong' value - used in testing the flavor projection in the paper
//User is required to provide the propagators with the right momentum
template<typename MatrixType>
void twoPointFunctionGeneric(fMatrix<Rcomplex> &into, const int tsrc, const Complex &coeff,
const SrcSnkOp<MatrixType> &sink_op, const SrcSnkOp<MatrixType> &src_op,
const ThreeMomentum &p_psibar, const ThreeMomentum &p_psi,
const PropSiteMatrixGetter &prop_dag, const PropSiteMatrixGetter &prop_undag,
const PropSplane splane = SPLANE_BOUNDARY,
bool use_opposite_sink_mom = false){
ThreeMomentum p_tot_src = p_psibar + p_psi;
ThreeMomentum p_tot_snk = -p_tot_src; //mom_phase computes exp(-p.x)
if(use_opposite_sink_mom) p_tot_snk = -p_tot_snk;
//if(!UniqueID()) printf("Computing 2pt LW with src momentum %s and snk momentum %s\n",p_tot_src.str().c_str(),p_tot_snk.str().c_str());
const int Lt = GJP.TnodeSites()*GJP.Tnodes();
//#define TWOPT_TEST
#ifndef TWOPT_TEST
const int nthread = omp_get_max_threads();
basicComplexArray<Rcomplex> tmp(Lt,nthread); //defaults to zero for all elements
#else
basicComplexArray<Rcomplex> tmp(Lt,1);
#endif
int vol3d = GJP.VolNodeSites()/GJP.TnodeSites();
#pragma omp parallel for
for(int x=0;x<GJP.VolNodeSites();x++){
int pos[4];
int rem = x;
for(int i=0;i<4;i++){ pos[i] = rem % GJP.NodeSites(i); rem /= GJP.NodeSites(i); }
int x3d_lcl = x % vol3d;
int t_glb = pos[3] + GJP.TnodeCoor() * GJP.TnodeSites();
int tdis_glb = (t_glb - tsrc + Lt) % Lt; //t_glb = 0 .. Lt-1 -> tdis_glb = Lt-tsrc .. Lt-1, 0 .. Lt-tsrc-1
//Actually getting the prop with the chosen tdis_glb depends on the periodicity of the propagator in question: cf propwrapper.h
MatrixType prop1_site;
prop_dag.siteMatrix(prop1_site,x3d_lcl,tdis_glb,splane);
prop1_site.hconj();
sink_op.rightMultiply(prop1_site);
MatrixType prop2_site;
prop_undag.siteMatrix(prop2_site,x3d_lcl,tdis_glb,splane);
src_op.rightMultiply(prop2_site);
std::complex<double> phase = coeff * mom_phase(p_tot_snk, pos);
#ifdef TWOPT_TEST
# pragma omp critical
{
tmp[tdis_glb] += phase * Trace(prop1_site, prop2_site);
}
#else
tmp(tdis_glb, omp_get_thread_num()) += phase * Trace(prop1_site, prop2_site);
#endif
}
#ifndef TWOPT_TEST
tmp.threadSum();
#endif
tmp.nodeSum();
for(int tdis=0;tdis<Lt;tdis++)
into(tsrc, tdis) = tmp[tdis];
}
template<typename MatrixType>
void twoPointFunctionWallSinkGeneric(fMatrix<Rcomplex> &into, const int tsrc, const Complex &coeff,
const SrcSnkOp<MatrixType> &sink_op, const SrcSnkOp<MatrixType> &src_op,
const WallSinkPropSiteMatrixGetter<MatrixType> &prop1W, const WallSinkPropSiteMatrixGetter<MatrixType> &prop2W){
const int Lt = GJP.TnodeSites()*GJP.Tnodes();
basicComplexArray<Rcomplex> tmp(Lt,1);
//WallSinkProp are available for all times on every node, so no need to nodeSum
#pragma omp_parallel for
for(int t_dis=0;t_dis<Lt;t_dis++){
MatrixType prop1_t;
prop1W.siteMatrix(prop1_t,t_dis);
prop1_t.hconj();
sink_op.rightMultiply(prop1_t);
MatrixType prop2_t;
prop2W.siteMatrix(prop2_t,t_dis);
src_op.rightMultiply(prop2_t);
tmp[t_dis] = coeff * Trace(prop1_t, prop2_t);
}
for(int tdis=0;tdis<Lt;tdis++)
into(tsrc, tdis) = tmp[tdis];
}
CPS_END_NAMESPACE
#endif
|
XSHA512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2008,2011 by Solar Designer
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_XSHA512;
#elif FMT_REGISTERS_H
john_register_one(&fmt_XSHA512);
#else
#include "sha2.h"
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#include "rawSHA512_common.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 4096
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 8192
#endif
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "xsha512"
#define FORMAT_NAME "Mac OS X 10.7"
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define PLAINTEXT_LENGTH 107
#define SALT_SIZE 4
#define SALT_ALIGN sizeof(ARCH_WORD_32)
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#if ARCH_BITS >= 64 || defined(__SSE2__)
/* 64-bitness happens to correlate with faster memcpy() */
#define PRECOMPUTE_CTX_FOR_SALT
#else
#undef PRECOMPUTE_CTX_FOR_SALT
#endif
#define BINARY_SIZE DIGEST_SIZE
#ifdef SIMD_COEF_64
#define GETPOS(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
static ARCH_WORD_64 (*saved_key)[SHA_BUF_SIZ*MAX_KEYS_PER_CRYPT];
static ARCH_WORD_64 (*crypt_out);
static int max_keys;
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
static ARCH_WORD_32 (*crypt_out)[DIGEST_SIZE/sizeof(ARCH_WORD_32)];
#ifdef PRECOMPUTE_CTX_FOR_SALT
static SHA512_CTX ctx_salt;
#else
static ARCH_WORD_32 saved_salt;
#endif
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_64
#ifndef _OPENMP
int omp_t = 1;
#endif
saved_key = mem_calloc_align(omp_t, sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt,
8 * sizeof(ARCH_WORD_64), MEM_ALIGN_SIMD);
max_keys = self->params.max_keys_per_crypt;
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
#ifndef SIMD_COEF_64
MEM_FREE(saved_len);
#endif
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
static union {
unsigned char c[SALT_SIZE];
ARCH_WORD_32 dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
ciphertext += XSHA512_TAG_LENGTH;
p = ciphertext;
for (i = 0; i < sizeof(buf.c); i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#ifdef SIMD_COEF_64
#define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64)
static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static int salt_hash(void *salt)
{
return *(ARCH_WORD_32 *)salt & (SALT_HASH_SIZE - 1);
}
static void set_salt(void *salt)
{
#ifndef SIMD_COEF_64
#ifdef PRECOMPUTE_CTX_FOR_SALT
SHA512_Init(&ctx_salt);
SHA512_Update(&ctx_salt, salt, SALT_SIZE);
#else
saved_salt = *(ARCH_WORD_32 *)salt;
#endif
#else
int i;
unsigned char *wucp = (unsigned char*)saved_key;
for (i = 0; i < max_keys; ++i) {
wucp[GETPOS(0, i)] = ((char*)salt)[0];
wucp[GETPOS(1, i)] = ((char*)salt)[1];
wucp[GETPOS(2, i)] = ((char*)salt)[2];
wucp[GETPOS(3, i)] = ((char*)salt)[3];
}
#endif
}
static void set_key(char *key, int index)
{
#ifndef SIMD_COEF_64
int length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
saved_len[index] = length;
memcpy(saved_key[index], key, length);
#else
ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64 *)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64];
ARCH_WORD_64 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_64 temp;
unsigned char *wucp = (unsigned char*)saved_key;
// ok, first 4 bytes (if there are that many or more), we handle one offs.
// this is because we already have 4 byte salt loaded into our saved_key.
// IF there are more bytes of password, we drop into the multi loader.
#if ARCH_ALLOWS_UNALIGNED
const ARCH_WORD_64 *wkey = (ARCH_WORD_64*)&(key[4]);
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint64_t));
const ARCH_WORD_64 *wkey = is_aligned(key + 4, sizeof(uint64_t)) ?
(ARCH_WORD_64*)(key + 4) : (ARCH_WORD_64*)buf_aligned;
if ((char *)wkey == buf_aligned && strlen(key) >= 4)
strcpy(buf_aligned, key + 4);
#endif
len = 4;
if (key[0] == 0) {wucp[GETPOS(4, index)] = 0x80; wucp[GETPOS(5, index)] = wucp[GETPOS(6, index)] = wucp[GETPOS(7, index)] = 0; goto key_cleaning; }
wucp[GETPOS(4, index)] = key[0];
++len;
if (key[1] == 0) {wucp[GETPOS(5, index)] = 0x80; wucp[GETPOS(6, index)] = wucp[GETPOS(7, index)] = 0; goto key_cleaning; }
wucp[GETPOS(5, index)] = key[1];
++len;
if (key[2] == 0) {wucp[GETPOS(6, index)] = 0x80; wucp[GETPOS(7, index)] = 0; goto key_cleaning; }
wucp[GETPOS(6, index)] = key[2];
++len;
if (key[3] == 0) {wucp[GETPOS(7, index)] = 0x80; goto key_cleaning; }
wucp[GETPOS(7, index)] = key[3];
++len;
keybuf_word += SIMD_COEF_64;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP64((temp & 0xff) | (0x80 << 8));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP64((temp & 0xffff) | (0x80 << 16));
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP64((temp & 0xffffff) | (0x80ULL << 24));
len+=3;
goto key_cleaning;
}
if (!(temp & 0xff00000000ULL))
{
*keybuf_word = JOHNSWAP64((temp & 0xffffffff) | (0x80ULL << 32));
len+=4;
goto key_cleaning;
}
if (!(temp & 0xff0000000000ULL))
{
*keybuf_word = JOHNSWAP64((temp & 0xffffffffffULL) | (0x80ULL << 40));
len+=5;
goto key_cleaning;
}
if (!(temp & 0xff000000000000ULL))
{
*keybuf_word = JOHNSWAP64((temp & 0xffffffffffffULL) | (0x80ULL << 48));
len+=6;
goto key_cleaning;
}
if (!(temp & 0xff00000000000000ULL))
{
*keybuf_word = JOHNSWAP64((temp & 0xffffffffffffffULL) | (0x80ULL << 56));
len+=7;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP64(temp);
len += 8;
keybuf_word += SIMD_COEF_64;
}
*keybuf_word = 0x8000000000000000ULL;
key_cleaning:
keybuf_word += SIMD_COEF_64;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_64;
}
keybuffer[15*SIMD_COEF_64] = len << 3;
#endif
}
static char *get_key(int index)
{
#ifndef SIMD_COEF_64
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
#else
static unsigned char key[PLAINTEXT_LENGTH+1];
int i;
unsigned char *wucp = (unsigned char*)saved_key;
ARCH_WORD_64 *keybuffer = &((ARCH_WORD_64*)saved_key)[(index&(SIMD_COEF_64-1)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64];
int len = (keybuffer[15*SIMD_COEF_64] >> 3) - SALT_SIZE;
for (i = 0; i < len; ++i)
key[i] = wucp[GETPOS(SALT_SIZE + i, index)];
key[i] = 0;
return (char*)key;
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#ifndef SIMD_COEF_64
#ifdef PRECOMPUTE_CTX_FOR_SALT
#pragma omp parallel for default(none) private(index) shared(ctx_salt, saved_key, saved_len, crypt_out)
#else
#pragma omp parallel for default(none) private(index) shared(saved_salt, saved_key, saved_len, crypt_out)
#endif
#else
#pragma omp parallel for
#endif
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) {
#ifdef SIMD_COEF_64
SIMDSHA512body(&saved_key[index/MAX_KEYS_PER_CRYPT], &crypt_out[HASH_IDX], NULL, SSEi_MIXED_IN);
#else
SHA512_CTX ctx;
#ifdef PRECOMPUTE_CTX_FOR_SALT
memcpy(&ctx, &ctx_salt, sizeof(ctx));
#else
SHA512_Init(&ctx);
SHA512_Update(&ctx, &saved_salt, SALT_SIZE);
#endif
SHA512_Update(&ctx, saved_key[index], saved_len[index]);
SHA512_Final((unsigned char *)(crypt_out[index]), &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_64
if (((ARCH_WORD_64 *) binary)[0] == crypt_out[HASH_IDX])
#else
if ( ((ARCH_WORD_32*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_64
int i;
for (i = 0; i < BINARY_SIZE/sizeof(ARCH_WORD_64); i++)
if (((ARCH_WORD_64*) binary)[i] != crypt_out[HASH_IDX + i*SIMD_COEF_64])
return 0;
return 1;
#else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_XSHA512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
XSHA512_BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
sha512_common_tests_xsha512
}, {
init,
done,
fmt_default_reset,
sha512_common_prepare_xsha512,
sha512_common_valid_xsha512,
sha512_common_split_xsha512,
sha512_common_binary_xsha512,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
vector.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_Vector class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "_hypre_utilities.hpp" //RL: TODO vector_device.c, include cuda there
/*--------------------------------------------------------------------------
* hypre_SeqVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCreate( HYPRE_Int size )
{
hypre_Vector *vector;
vector = hypre_CTAlloc(hypre_Vector, 1, HYPRE_MEMORY_HOST);
hypre_VectorData(vector) = NULL;
hypre_VectorSize(vector) = size;
hypre_VectorNumVectors(vector) = 1;
hypre_VectorMultiVecStorageMethod(vector) = 0;
/* set defaults */
hypre_VectorOwnsData(vector) = 1;
hypre_VectorMemoryLocation(vector) = hypre_HandleMemoryLocation(hypre_handle());
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultiVectorCreate
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors )
{
hypre_Vector *vector = hypre_SeqVectorCreate(size);
hypre_VectorNumVectors(vector) = num_vectors;
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorDestroy( hypre_Vector *vector )
{
HYPRE_Int ierr=0;
if (vector)
{
HYPRE_MemoryLocation memory_location = hypre_VectorMemoryLocation(vector);
if ( hypre_VectorOwnsData(vector) )
{
hypre_TFree(hypre_VectorData(vector), memory_location);
}
hypre_TFree(vector, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorInitialize_v2( hypre_Vector *vector, HYPRE_MemoryLocation memory_location )
{
HYPRE_Int size = hypre_VectorSize(vector);
HYPRE_Int ierr = 0;
HYPRE_Int num_vectors = hypre_VectorNumVectors(vector);
HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector);
hypre_VectorMemoryLocation(vector) = memory_location;
/* Caveat: for pre-existing data, the memory location must be guaranteed
* to be consistent with `memory_location'
* Otherwise, mismatches will exist and problems will be encountered
* when being used, and freed */
if ( !hypre_VectorData(vector) )
{
hypre_VectorData(vector) = hypre_CTAlloc(HYPRE_Complex, num_vectors*size, memory_location);
}
if ( multivec_storage_method == 0 )
{
hypre_VectorVectorStride(vector) = size;
hypre_VectorIndexStride(vector) = 1;
}
else if ( multivec_storage_method == 1 )
{
hypre_VectorVectorStride(vector) = 1;
hypre_VectorIndexStride(vector) = num_vectors;
}
else
{
++ierr;
}
return ierr;
}
HYPRE_Int
hypre_SeqVectorInitialize( hypre_Vector *vector )
{
HYPRE_Int ierr;
ierr = hypre_SeqVectorInitialize_v2( vector, hypre_VectorMemoryLocation(vector) );
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetDataOwner( hypre_Vector *vector,
HYPRE_Int owns_data )
{
HYPRE_Int ierr=0;
hypre_VectorOwnsData(vector) = owns_data;
return ierr;
}
/*--------------------------------------------------------------------------
* ReadVector
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorRead( char *file_name )
{
hypre_Vector *vector;
FILE *fp;
HYPRE_Complex *data;
HYPRE_Int size;
HYPRE_Int j;
/*----------------------------------------------------------
* Read in the data
*----------------------------------------------------------*/
fp = fopen(file_name, "r");
hypre_fscanf(fp, "%d", &size);
vector = hypre_SeqVectorCreate(size);
hypre_VectorMemoryLocation(vector) = HYPRE_MEMORY_HOST;
hypre_SeqVectorInitialize(vector);
data = hypre_VectorData(vector);
for (j = 0; j < size; j++)
{
hypre_fscanf(fp, "%le", &data[j]);
}
fclose(fp);
/* multivector code not written yet */
hypre_assert( hypre_VectorNumVectors(vector) == 1 );
return vector;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorPrint( hypre_Vector *vector,
char *file_name )
{
FILE *fp;
HYPRE_Complex *data;
HYPRE_Int size, num_vectors, vecstride, idxstride;
HYPRE_Int i, j;
HYPRE_Complex value;
HYPRE_Int ierr = 0;
num_vectors = hypre_VectorNumVectors(vector);
vecstride = hypre_VectorVectorStride(vector);
idxstride = hypre_VectorIndexStride(vector);
/*----------------------------------------------------------
* Print in the data
*----------------------------------------------------------*/
data = hypre_VectorData(vector);
size = hypre_VectorSize(vector);
fp = fopen(file_name, "w");
if ( hypre_VectorNumVectors(vector) == 1 )
{
hypre_fprintf(fp, "%d\n", size);
}
else
{
hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size );
}
if ( num_vectors>1 )
{
for ( j=0; j<num_vectors; ++j )
{
hypre_fprintf(fp, "vector %d\n", j );
for (i = 0; i < size; i++)
{
value = data[ j*vecstride + i*idxstride ];
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(value), hypre_cimag(value));
#else
hypre_fprintf(fp, "%.14e\n", value);
#endif
}
}
}
else
{
for (i = 0; i < size; i++)
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(data[i]), hypre_cimag(data[i]));
#else
hypre_fprintf(fp, "%.14e\n", data[i]);
#endif
}
}
fclose(fp);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetConstantValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetConstantValues( hypre_Vector *v,
HYPRE_Complex value )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *vector_data = hypre_VectorData(v);
HYPRE_Int size = hypre_VectorSize(v);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(v);
//hypre_SeqVectorPrefetch(v, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
HYPRE_THRUST_CALL( fill_n, vector_data, size, value );
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(vector_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
vector_data[i] = value;
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorSetRandomValues
*
* returns vector of values randomly distributed between -1.0 and +1.0
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorSetRandomValues( hypre_Vector *v,
HYPRE_Int seed )
{
HYPRE_Complex *vector_data = hypre_VectorData(v);
HYPRE_Int size = hypre_VectorSize(v);
HYPRE_Int i;
HYPRE_Int ierr = 0;
hypre_SeedRand(seed);
size *= hypre_VectorNumVectors(v);
if (hypre_GetActualMemLocation(hypre_VectorMemoryLocation(v)) == hypre_MEMORY_HOST)
{
/* RDF: threading this loop may cause problems because of hypre_Rand() */
for (i = 0; i < size; i++)
{
vector_data[i] = 2.0 * hypre_Rand() - 1.0;
}
}
else
{
HYPRE_Complex *h_data = hypre_TAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
for (i = 0; i < size; i++)
{
h_data[i] = 2.0 * hypre_Rand() - 1.0;
}
hypre_TMemcpy(vector_data, h_data, HYPRE_Complex, size, hypre_VectorMemoryLocation(v), HYPRE_MEMORY_HOST);
hypre_TFree(h_data, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCopy
* copies data from x to y
* if size of x is larger than y only the first size_y elements of x are
* copied to y
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorCopy( hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Int ierr = 0;
size_t size = hypre_min( hypre_VectorSize(x), hypre_VectorSize(y) ) * hypre_VectorNumVectors(x);
hypre_TMemcpy( hypre_VectorData(y),
hypre_VectorData(x),
HYPRE_Complex,
size,
hypre_VectorMemoryLocation(y),
hypre_VectorMemoryLocation(x) );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneDeep
* Returns a complete copy of x - a deep copy, with its own copy of the data.
*--------------------------------------------------------------------------*/
hypre_Vector*
hypre_SeqVectorCloneDeep_v2( hypre_Vector *x, HYPRE_MemoryLocation memory_location )
{
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector *y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_SeqVectorInitialize_v2(y, memory_location);
hypre_SeqVectorCopy( x, y );
return y;
}
hypre_Vector*
hypre_SeqVectorCloneDeep( hypre_Vector *x )
{
return hypre_SeqVectorCloneDeep_v2(x, hypre_VectorMemoryLocation(x));
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorCloneShallow
* Returns a complete copy of x - a shallow copy, pointing the data of x
*--------------------------------------------------------------------------*/
hypre_Vector *
hypre_SeqVectorCloneShallow( hypre_Vector *x )
{
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors );
hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x);
hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x);
hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x);
hypre_VectorMemoryLocation(y) = hypre_VectorMemoryLocation(x);
hypre_VectorData(y) = hypre_VectorData(x);
hypre_SeqVectorSetDataOwner( y, 0 );
hypre_SeqVectorInitialize(y);
return y;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorScale
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorScale( HYPRE_Complex alpha,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(y);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(y);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDscal(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, y_data, 1) );
#else
HYPRE_THRUST_CALL( transform, y_data, y_data + size, y_data, alpha * _1 );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] *= alpha;
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorAxpy( HYPRE_Complex alpha,
hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDaxpy(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, x_data, 1, y_data, 1) );
#else
HYPRE_THRUST_CALL( transform, x_data, x_data + size, y_data, y_data, alpha * _1 + _2 );
#endif
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha * x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorInnerProd
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_SeqVectorInnerProd( hypre_Vector *x,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Real result = 0.0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
#if defined(HYPRE_USING_CUDA)
#ifndef HYPRE_COMPLEX
#if defined(HYPRE_USING_CUBLAS)
HYPRE_CUBLAS_CALL( cublasDdot(hypre_HandleCublasHandle(hypre_handle()), size, x_data, 1, y_data, 1, &result) );
#else
result = HYPRE_THRUST_CALL( inner_product, x_data, x_data + size, y_data, 0.0 );
#endif
#else
/* TODO */
#error "Complex inner product"
#endif
#else /* #if defined(HYPRE_USING_CUDA) */
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) reduction(+:result) is_device_ptr(y_data,x_data) map(result)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
result += hypre_conj(y_data[i]) * x_data[i];
}
#endif /* defined(HYPRE_USING_CUDA) */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return result;
}
//TODO
/*--------------------------------------------------------------------------
* hypre_VectorSumElts:
* Returns the sum of all vector elements.
*--------------------------------------------------------------------------*/
HYPRE_Complex hypre_SeqVectorSumElts( hypre_Vector *vector )
{
HYPRE_Complex sum = 0;
HYPRE_Complex *data = hypre_VectorData( vector );
HYPRE_Int size = hypre_VectorSize( vector );
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE
#endif
for ( i=0; i<size; ++i ) sum += data[i];
return sum;
}
HYPRE_Int
hypre_SeqVectorPrefetch( hypre_Vector *x, HYPRE_MemoryLocation memory_location)
{
HYPRE_Int ierr = 0;
#ifdef HYPRE_USING_UNIFIED_MEMORY
if (hypre_VectorMemoryLocation(x) != HYPRE_MEMORY_DEVICE)
{
/* hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! CUDA Prefetch with non-unified momory\n");*/
return 1;
}
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Int size = hypre_VectorSize(x) * hypre_VectorNumVectors(x);
if (size == 0)
{
return ierr;
}
hypre_MemPrefetch(x_data, sizeof(HYPRE_Complex)*size, memory_location);
#endif
return ierr;
}
#if 0
/* y[i] = max(alpha*x[i], beta*y[i]) */
HYPRE_Int
hypre_SeqVectorMax( HYPRE_Complex alpha,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime();
#endif
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int ierr = 0;
size *= hypre_VectorNumVectors(x);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
thrust::maximum<HYPRE_Complex> mx;
#if defined(HYPRE_USING_CUDA)
HYPRE_THRUST_CALL( transform,
thrust::make_transform_iterator(x_data, alpha * _1),
thrust::make_transform_iterator(x_data + size, alpha * _1),
thrust::make_transform_iterator(y_data, beta * _1),
y_data,
mx );
#else
HYPRE_Int i;
#if defined(HYPRE_USING_DEVICE_OPENMP)
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data)
#elif defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += hypre_max(alpha * x_data[i], beta * y_data[i]);
}
#endif /* defined(HYPRE_USING_CUDA) */
hypre_SyncCudaComputeStream(hypre_handle());
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime();
#endif
return ierr;
}
#endif
|
ccv_bbf.c | #include "ccv.h"
#include "ccv_internal.h"
#include <sys/time.h>
#ifdef HAVE_GSL
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#endif
#ifdef USE_OPENMP
#include <omp.h>
#endif
const ccv_bbf_param_t ccv_bbf_default_params = {
.interval = 5,
.min_neighbors = 2,
.accurate = 1,
.flags = 0,
.size = {
24,
24,
},
};
#define _ccv_width_padding(x) (((x) + 3) & -4)
static inline int _ccv_run_bbf_feature(ccv_bbf_feature_t* feature, int* step, unsigned char** u8)
{
#define pf_at(i) (*(u8[feature->pz[i]] + feature->px[i] + feature->py[i] * step[feature->pz[i]]))
#define nf_at(i) (*(u8[feature->nz[i]] + feature->nx[i] + feature->ny[i] * step[feature->nz[i]]))
unsigned char pmin = pf_at(0), nmax = nf_at(0);
/* check if every point in P > every point in N, and take a shortcut */
if (pmin <= nmax)
return 0;
int i;
for (i = 1; i < feature->size; i++)
{
if (feature->pz[i] >= 0)
{
int p = pf_at(i);
if (p < pmin)
{
if (p <= nmax)
return 0;
pmin = p;
}
}
if (feature->nz[i] >= 0)
{
int n = nf_at(i);
if (n > nmax)
{
if (pmin <= n)
return 0;
nmax = n;
}
}
}
#undef pf_at
#undef nf_at
return 1;
}
static int _ccv_read_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier)
{
FILE* r = fopen(file, "r");
if (r == 0) return -1;
int stat = 0;
stat |= fscanf(r, "%d", &classifier->count);
union { float fl; int i; } fli;
stat |= fscanf(r, "%d", &fli.i);
classifier->threshold = fli.fl;
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
int i, j;
for (i = 0; i < classifier->count; i++)
{
stat |= fscanf(r, "%d", &classifier->feature[i].size);
for (j = 0; j < classifier->feature[i].size; j++)
{
stat |= fscanf(r, "%d %d %d", &classifier->feature[i].px[j], &classifier->feature[i].py[j], &classifier->feature[i].pz[j]);
stat |= fscanf(r, "%d %d %d", &classifier->feature[i].nx[j], &classifier->feature[i].ny[j], &classifier->feature[i].nz[j]);
}
union { float fl; int i; } flia, flib;
stat |= fscanf(r, "%d %d", &flia.i, &flib.i);
classifier->alpha[i * 2] = flia.fl;
classifier->alpha[i * 2 + 1] = flib.fl;
}
fclose(r);
return 0;
}
#ifdef HAVE_GSL
static unsigned int _ccv_bbf_time_measure()
{
struct timeval tv;
gettimeofday(&tv, 0);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
#define less_than(a, b, aux) ((a) < (b))
CCV_IMPLEMENT_QSORT(_ccv_sort_32f, float, less_than)
#undef less_than
static void _ccv_bbf_eval_data(ccv_bbf_stage_classifier_t* classifier, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, float* peval, float* neval)
{
int i, j;
int steps[] = { _ccv_width_padding(size.width),
_ccv_width_padding(size.width >> 1),
_ccv_width_padding(size.width >> 2) };
int isizs0 = steps[0] * size.height;
int isizs01 = isizs0 + steps[1] * (size.height >> 1);
for (i = 0; i < posnum; i++)
{
unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
peval[i] = sum;
}
for (i = 0; i < negnum; i++)
{
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
neval[i] = sum;
}
}
static int _ccv_prune_positive_data(ccv_bbf_classifier_cascade_t* cascade, unsigned char** posdata, int posnum, ccv_size_t size)
{
float* peval = (float*)ccmalloc(posnum * sizeof(float));
int i, j, k, rpos = posnum;
for (i = 0; i < cascade->count; i++)
{
_ccv_bbf_eval_data(cascade->stage_classifier + i, posdata, rpos, 0, 0, size, peval, 0);
k = 0;
for (j = 0; j < rpos; j++)
if (peval[j] >= cascade->stage_classifier[i].threshold)
{
posdata[k] = posdata[j];
++k;
} else {
ccfree(posdata[j]);
}
rpos = k;
}
ccfree(peval);
return rpos;
}
static int _ccv_prepare_background_data(ccv_bbf_classifier_cascade_t* cascade, char** bgfiles, int bgnum, unsigned char** negdata, int negnum)
{
int t, i, j, k, q;
int negperbg = negnum / bgnum + 1;
int negtotal = 0;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs1 = steps[1] * (cascade->size.height >> 1);
int isizs2 = steps[2] * (cascade->size.height >> 2);
int* idcheck = (int*)ccmalloc(negnum * sizeof(int));
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
gsl_rng_set(rng, (unsigned long int)idcheck);
ccv_size_t imgsz = cascade->size;
int rneg = negtotal;
for (t = 0; negtotal < negnum; t++)
{
printf("preparing negative data ... 0%%");
for (i = 0; i < bgnum; i++)
{
negperbg = (t < 2) ? (negnum - negtotal) / (bgnum - i) + 1 : negnum - negtotal;
ccv_dense_matrix_t* image = 0;
ccv_read(bgfiles[i], &image, CCV_IO_GRAY | CCV_IO_ANY_FILE);
assert((image->type & CCV_C1) && (image->type & CCV_8U));
if (image == 0)
{
printf("\n%s file corrupted\n", bgfiles[i]);
continue;
}
if (t % 2 != 0)
ccv_flip(image, 0, 0, CCV_FLIP_X);
if (t % 4 >= 2)
ccv_flip(image, 0, 0, CCV_FLIP_Y);
ccv_bbf_param_t params = { .interval = 3, .min_neighbors = 0, .accurate = 1, .flags = 0, .size = cascade->size };
ccv_array_t* detected = ccv_bbf_detect_objects(image, &cascade, 1, params);
memset(idcheck, 0, ccv_min(detected->rnum, negperbg) * sizeof(int));
for (j = 0; j < ccv_min(detected->rnum, negperbg); j++)
{
int r = gsl_rng_uniform_int(rng, detected->rnum);
int flag = 1;
ccv_rect_t* rect = (ccv_rect_t*)ccv_array_get(detected, r);
while (flag) {
flag = 0;
for (k = 0; k < j; k++)
if (r == idcheck[k])
{
flag = 1;
r = gsl_rng_uniform_int(rng, detected->rnum);
break;
}
rect = (ccv_rect_t*)ccv_array_get(detected, r);
if ((rect->x < 0) || (rect->y < 0) || (rect->width + rect->x >= image->cols) || (rect->height + rect->y >= image->rows))
{
flag = 1;
r = gsl_rng_uniform_int(rng, detected->rnum);
}
}
idcheck[j] = r;
ccv_dense_matrix_t* temp = 0;
ccv_dense_matrix_t* imgs0 = 0;
ccv_dense_matrix_t* imgs1 = 0;
ccv_dense_matrix_t* imgs2 = 0;
ccv_slice(image, (ccv_matrix_t**)&temp, 0, rect->y, rect->x, rect->height, rect->width);
ccv_resample(temp, &imgs0, 0, imgsz.height, imgsz.width, CCV_INTER_AREA);
assert(imgs0->step == steps[0]);
ccv_matrix_free(temp);
ccv_sample_down(imgs0, &imgs1, 0, 0, 0);
assert(imgs1->step == steps[1]);
ccv_sample_down(imgs1, &imgs2, 0, 0, 0);
assert(imgs2->step == steps[2]);
negdata[negtotal] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2);
unsigned char* u8s0 = negdata[negtotal];
unsigned char* u8s1 = negdata[negtotal] + isizs0;
unsigned char* u8s2 = negdata[negtotal] + isizs0 + isizs1;
unsigned char* u8[] = { u8s0, u8s1, u8s2 };
memcpy(u8s0, imgs0->data.u8, imgs0->rows * imgs0->step);
ccv_matrix_free(imgs0);
memcpy(u8s1, imgs1->data.u8, imgs1->rows * imgs1->step);
ccv_matrix_free(imgs1);
memcpy(u8s2, imgs2->data.u8, imgs2->rows * imgs2->step);
ccv_matrix_free(imgs2);
flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (k = 0; k < cascade->count; ++k, ++classifier)
{
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (q = 0; q < classifier->count; ++q, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (!flag)
ccfree(negdata[negtotal]);
else {
++negtotal;
if (negtotal >= negnum)
break;
}
}
ccv_array_free(detected);
ccv_matrix_free(image);
ccv_drain_cache();
printf("\rpreparing negative data ... %2d%%", 100 * negtotal / negnum);
fflush(0);
if (negtotal >= negnum)
break;
}
if (rneg == negtotal)
break;
rneg = negtotal;
printf("\nentering additional round %d\n", t + 1);
}
gsl_rng_free(rng);
ccfree(idcheck);
ccv_drain_cache();
printf("\n");
return negtotal;
}
static void _ccv_prepare_positive_data(ccv_dense_matrix_t** posimg, unsigned char** posdata, ccv_size_t size, int posnum)
{
printf("preparing positive data ... 0%%");
int i;
for (i = 0; i < posnum; i++)
{
ccv_dense_matrix_t* imgs0 = posimg[i];
ccv_dense_matrix_t* imgs1 = 0;
ccv_dense_matrix_t* imgs2 = 0;
assert((imgs0->type & CCV_C1) && (imgs0->type & CCV_8U) && imgs0->rows == size.height && imgs0->cols == size.width);
ccv_sample_down(imgs0, &imgs1, 0, 0, 0);
ccv_sample_down(imgs1, &imgs2, 0, 0, 0);
int isizs0 = imgs0->rows * imgs0->step;
int isizs1 = imgs1->rows * imgs1->step;
int isizs2 = imgs2->rows * imgs2->step;
posdata[i] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2);
memcpy(posdata[i], imgs0->data.u8, isizs0);
memcpy(posdata[i] + isizs0, imgs1->data.u8, isizs1);
memcpy(posdata[i] + isizs0 + isizs1, imgs2->data.u8, isizs2);
printf("\rpreparing positive data ... %2d%%", 100 * (i + 1) / posnum);
fflush(0);
ccv_matrix_free(imgs1);
ccv_matrix_free(imgs2);
}
ccv_drain_cache();
printf("\n");
}
typedef struct {
double fitness;
int pk, nk;
int age;
double error;
ccv_bbf_feature_t feature;
} ccv_bbf_gene_t;
static inline void _ccv_bbf_genetic_fitness(ccv_bbf_gene_t* gene)
{
gene->fitness = (1 - gene->error) * exp(-0.01 * gene->age) * exp((gene->pk + gene->nk) * log(1.015));
}
static inline int _ccv_bbf_exist_gene_feature(ccv_bbf_gene_t* gene, int x, int y, int z)
{
int i;
for (i = 0; i < gene->pk; i++)
if (z == gene->feature.pz[i] && x == gene->feature.px[i] && y == gene->feature.py[i])
return 1;
for (i = 0; i < gene->nk; i++)
if (z == gene->feature.nz[i] && x == gene->feature.nx[i] && y == gene->feature.ny[i])
return 1;
return 0;
}
static inline void _ccv_bbf_randomize_gene(gsl_rng* rng, ccv_bbf_gene_t* gene, int* rows, int* cols)
{
int i;
do {
gene->pk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1;
gene->nk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1;
} while (gene->pk + gene->nk < CCV_BBF_POINT_MIN); /* a hard restriction of at least 3 points have to be examed */
gene->feature.size = ccv_max(gene->pk, gene->nk);
gene->age = 0;
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
{
gene->feature.pz[i] = -1;
gene->feature.nz[i] = -1;
}
int x, y, z;
for (i = 0; i < gene->pk; i++)
{
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(gene, x, y, z));
gene->feature.pz[i] = z;
gene->feature.px[i] = x;
gene->feature.py[i] = y;
}
for (i = 0; i < gene->nk; i++)
{
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while ( _ccv_bbf_exist_gene_feature(gene, x, y, z));
gene->feature.nz[i] = z;
gene->feature.nx[i] = x;
gene->feature.ny[i] = y;
}
}
static inline double _ccv_bbf_error_rate(ccv_bbf_feature_t* feature, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw)
{
int i;
int steps[] = { _ccv_width_padding(size.width),
_ccv_width_padding(size.width >> 1),
_ccv_width_padding(size.width >> 2) };
int isizs0 = steps[0] * size.height;
int isizs01 = isizs0 + steps[1] * (size.height >> 1);
double error = 0;
for (i = 0; i < posnum; i++)
{
unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 };
if (!_ccv_run_bbf_feature(feature, steps, u8))
error += pw[i];
}
for (i = 0; i < negnum; i++)
{
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
if ( _ccv_run_bbf_feature(feature, steps, u8))
error += nw[i];
}
return error;
}
#define less_than(fit1, fit2, aux) ((fit1).fitness >= (fit2).fitness)
static CCV_IMPLEMENT_QSORT(_ccv_bbf_genetic_qsort, ccv_bbf_gene_t, less_than)
#undef less_than
static ccv_bbf_feature_t _ccv_bbf_genetic_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, int ftnum, ccv_size_t size, double* pw, double* nw)
{
ccv_bbf_feature_t best;
/* seed (random method) */
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
union { unsigned long int li; double db; } dbli;
dbli.db = pw[0] + nw[0];
gsl_rng_set(rng, dbli.li);
int i, j;
int pnum = ftnum * 100;
ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc(pnum * sizeof(ccv_bbf_gene_t));
int rows[] = { size.height, size.height >> 1, size.height >> 2 };
int cols[] = { size.width, size.width >> 1, size.width >> 2 };
for (i = 0; i < pnum; i++)
_ccv_bbf_randomize_gene(rng, &gene[i], rows, cols);
unsigned int timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
for (i = 0; i < pnum; i++)
_ccv_bbf_genetic_fitness(&gene[i]);
double best_err = 1;
int rnum = ftnum * 39; /* number of randomize */
int mnum = ftnum * 40; /* number of mutation */
int hnum = ftnum * 20; /* number of hybrid */
/* iteration stop crit : best no change in 40 iterations */
int it = 0, t;
for (t = 0 ; it < 40; ++it, ++t)
{
int min_id = 0;
double min_err = gene[0].error;
for (i = 1; i < pnum; i++)
if (gene[i].error < min_err)
{
min_id = i;
min_err = gene[i].error;
}
min_err = gene[min_id].error = _ccv_bbf_error_rate(&gene[min_id].feature, posdata, posnum, negdata, negnum, size, pw, nw);
if (min_err < best_err)
{
best_err = min_err;
memcpy(&best, &gene[min_id].feature, sizeof(best));
printf("best bbf feature with error %f\n|-size: %d\n|-positive point: ", best_err, best.size);
for (i = 0; i < best.size; i++)
printf("(%d %d %d), ", best.px[i], best.py[i], best.pz[i]);
printf("\n|-negative point: ");
for (i = 0; i < best.size; i++)
printf("(%d %d %d), ", best.nx[i], best.ny[i], best.nz[i]);
printf("\n");
it = 0;
}
printf("minimum error achieved in round %d(%d) : %f with %d ms\n", t, it, min_err, timer / 1000);
_ccv_bbf_genetic_qsort(gene, pnum, 0);
for (i = 0; i < ftnum; i++)
++gene[i].age;
for (i = ftnum; i < ftnum + mnum; i++)
{
int parent = gsl_rng_uniform_int(rng, ftnum);
memcpy(gene + i, gene + parent, sizeof(ccv_bbf_gene_t));
/* three mutation strategy : 1. add, 2. remove, 3. refine */
int pnm, pn = gsl_rng_uniform_int(rng, 2);
int* pnk[] = { &gene[i].pk, &gene[i].nk };
int* pnx[] = { gene[i].feature.px, gene[i].feature.nx };
int* pny[] = { gene[i].feature.py, gene[i].feature.ny };
int* pnz[] = { gene[i].feature.pz, gene[i].feature.nz };
int x, y, z;
int victim, decay = 1;
do {
switch (gsl_rng_uniform_int(rng, 3))
{
case 0: /* add */
if (gene[i].pk == CCV_BBF_POINT_MAX && gene[i].nk == CCV_BBF_POINT_MAX)
break;
while (*pnk[pn] + 1 > CCV_BBF_POINT_MAX)
pn = gsl_rng_uniform_int(rng, 2);
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z));
pnz[pn][*pnk[pn]] = z;
pnx[pn][*pnk[pn]] = x;
pny[pn][*pnk[pn]] = y;
++(*pnk[pn]);
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
decay = gene[i].age = 0;
break;
case 1: /* remove */
if (gene[i].pk + gene[i].nk <= CCV_BBF_POINT_MIN) /* at least 3 points have to be examed */
break;
while (*pnk[pn] - 1 <= 0) // || *pnk[pn] + *pnk[!pn] - 1 < CCV_BBF_POINT_MIN)
pn = gsl_rng_uniform_int(rng, 2);
victim = gsl_rng_uniform_int(rng, *pnk[pn]);
for (j = victim; j < *pnk[pn] - 1; j++)
{
pnz[pn][j] = pnz[pn][j + 1];
pnx[pn][j] = pnx[pn][j + 1];
pny[pn][j] = pny[pn][j + 1];
}
pnz[pn][*pnk[pn] - 1] = -1;
--(*pnk[pn]);
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
decay = gene[i].age = 0;
break;
case 2: /* refine */
pnm = gsl_rng_uniform_int(rng, *pnk[pn]);
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z));
pnz[pn][pnm] = z;
pnx[pn][pnm] = x;
pny[pn][pnm] = y;
decay = gene[i].age = 0;
break;
}
} while (decay);
}
for (i = ftnum + mnum; i < ftnum + mnum + hnum; i++)
{
/* hybrid strategy: taking positive points from dad, negative points from mum */
int dad, mum;
do {
dad = gsl_rng_uniform_int(rng, ftnum);
mum = gsl_rng_uniform_int(rng, ftnum);
} while (dad == mum || gene[dad].pk + gene[mum].nk < CCV_BBF_POINT_MIN); /* at least 3 points have to be examed */
for (j = 0; j < CCV_BBF_POINT_MAX; j++)
{
gene[i].feature.pz[j] = -1;
gene[i].feature.nz[j] = -1;
}
gene[i].pk = gene[dad].pk;
for (j = 0; j < gene[i].pk; j++)
{
gene[i].feature.pz[j] = gene[dad].feature.pz[j];
gene[i].feature.px[j] = gene[dad].feature.px[j];
gene[i].feature.py[j] = gene[dad].feature.py[j];
}
gene[i].nk = gene[mum].nk;
for (j = 0; j < gene[i].nk; j++)
{
gene[i].feature.nz[j] = gene[mum].feature.nz[j];
gene[i].feature.nx[j] = gene[mum].feature.nx[j];
gene[i].feature.ny[j] = gene[mum].feature.ny[j];
}
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
gene[i].age = 0;
}
for (i = ftnum + mnum + hnum; i < ftnum + mnum + hnum + rnum; i++)
_ccv_bbf_randomize_gene(rng, &gene[i], rows, cols);
timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
for (i = 0; i < pnum; i++)
_ccv_bbf_genetic_fitness(&gene[i]);
}
ccfree(gene);
gsl_rng_free(rng);
return best;
}
#define less_than(fit1, fit2, aux) ((fit1).error < (fit2).error)
static CCV_IMPLEMENT_QSORT(_ccv_bbf_best_qsort, ccv_bbf_gene_t, less_than)
#undef less_than
static ccv_bbf_gene_t _ccv_bbf_best_gene(ccv_bbf_gene_t* gene, int pnum, int point_min, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw)
{
int i;
unsigned int timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
_ccv_bbf_best_qsort(gene, pnum, 0);
int min_id = 0;
double min_err = gene[0].error;
for (i = 0; i < pnum; i++)
if (gene[i].nk + gene[i].pk >= point_min)
{
min_id = i;
min_err = gene[i].error;
break;
}
printf("local best bbf feature with error %f\n|-size: %d\n|-positive point: ", min_err, gene[min_id].feature.size);
for (i = 0; i < gene[min_id].feature.size; i++)
printf("(%d %d %d), ", gene[min_id].feature.px[i], gene[min_id].feature.py[i], gene[min_id].feature.pz[i]);
printf("\n|-negative point: ");
for (i = 0; i < gene[min_id].feature.size; i++)
printf("(%d %d %d), ", gene[min_id].feature.nx[i], gene[min_id].feature.ny[i], gene[min_id].feature.nz[i]);
printf("\nthe computation takes %d ms\n", timer / 1000);
return gene[min_id];
}
static ccv_bbf_feature_t _ccv_bbf_convex_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_bbf_feature_t* best_feature, ccv_size_t size, double* pw, double* nw)
{
ccv_bbf_gene_t best_gene;
/* seed (random method) */
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
union { unsigned long int li; double db; } dbli;
dbli.db = pw[0] + nw[0];
gsl_rng_set(rng, dbli.li);
int i, j, k, q, p, g, t;
int rows[] = { size.height, size.height >> 1, size.height >> 2 };
int cols[] = { size.width, size.width >> 1, size.width >> 2 };
int pnum = rows[0] * cols[0] + rows[1] * cols[1] + rows[2] * cols[2];
ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc((pnum * (CCV_BBF_POINT_MAX * 2 + 1) * 2 + CCV_BBF_POINT_MAX * 2 + 1) * sizeof(ccv_bbf_gene_t));
if (best_feature == 0)
{
/* bootstrapping the best feature, start from two pixels, one for positive, one for negative
* the bootstrapping process go like this: first, it will assign a random pixel as positive
* and enumerate every possible pixel as negative, and pick the best one. Then, enumerate every
* possible pixel as positive, and pick the best one, until it converges */
memset(&best_gene, 0, sizeof(ccv_bbf_gene_t));
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
best_gene.feature.pz[i] = best_gene.feature.nz[i] = -1;
best_gene.pk = 1;
best_gene.nk = 0;
best_gene.feature.size = 1;
best_gene.feature.pz[0] = gsl_rng_uniform_int(rng, 3);
best_gene.feature.px[0] = gsl_rng_uniform_int(rng, cols[best_gene.feature.pz[0]]);
best_gene.feature.py[0] = gsl_rng_uniform_int(rng, rows[best_gene.feature.pz[0]]);
for (t = 0; ; ++t)
{
g = 0;
if (t % 2 == 0)
{
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (i != best_gene.feature.pz[0] || j != best_gene.feature.px[0] || k != best_gene.feature.py[0])
{
gene[g] = best_gene;
gene[g].pk = gene[g].nk = 1;
gene[g].feature.nz[0] = i;
gene[g].feature.nx[0] = j;
gene[g].feature.ny[0] = k;
g++;
}
} else {
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (i != best_gene.feature.nz[0] || j != best_gene.feature.nx[0] || k != best_gene.feature.ny[0])
{
gene[g] = best_gene;
gene[g].pk = gene[g].nk = 1;
gene[g].feature.pz[0] = i;
gene[g].feature.px[0] = j;
gene[g].feature.py[0] = k;
g++;
}
}
printf("bootstrapping round : %d\n", t);
ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, 2, posdata, posnum, negdata, negnum, size, pw, nw);
if (local_gene.error >= best_gene.error - 1e-10)
break;
best_gene = local_gene;
}
} else {
best_gene.feature = *best_feature;
best_gene.pk = best_gene.nk = best_gene.feature.size;
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
if (best_feature->pz[i] == -1)
{
best_gene.pk = i;
break;
}
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
if (best_feature->nz[i] == -1)
{
best_gene.nk = i;
break;
}
}
/* after bootstrapping, the float search technique will do the following permutations:
* a). add a new point to positive or negative
* b). remove a point from positive or negative
* c). move an existing point in positive or negative to another position
* the three rules applied exhaustively, no heuristic used. */
for (t = 0; ; ++t)
{
g = 0;
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (!_ccv_bbf_exist_gene_feature(&best_gene, j, k, i))
{
/* add positive point */
if (best_gene.pk < CCV_BBF_POINT_MAX - 1)
{
gene[g] = best_gene;
gene[g].feature.pz[gene[g].pk] = i;
gene[g].feature.px[gene[g].pk] = j;
gene[g].feature.py[gene[g].pk] = k;
gene[g].pk++;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
/* add negative point */
if (best_gene.nk < CCV_BBF_POINT_MAX - 1)
{
gene[g] = best_gene;
gene[g].feature.nz[gene[g].nk] = i;
gene[g].feature.nx[gene[g].nk] = j;
gene[g].feature.ny[gene[g].nk] = k;
gene[g].nk++;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
/* refine positive point */
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
gene[g].feature.pz[q] = i;
gene[g].feature.px[q] = j;
gene[g].feature.py[q] = k;
g++;
}
/* add positive point, remove negative point */
if (best_gene.pk < CCV_BBF_POINT_MAX - 1 && best_gene.nk > 1)
{
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
gene[g].feature.pz[gene[g].pk] = i;
gene[g].feature.px[gene[g].pk] = j;
gene[g].feature.py[gene[g].pk] = k;
gene[g].pk++;
for (p = q; p < best_gene.nk - 1; p++)
{
gene[g].feature.nz[p] = gene[g].feature.nz[p + 1];
gene[g].feature.nx[p] = gene[g].feature.nx[p + 1];
gene[g].feature.ny[p] = gene[g].feature.ny[p + 1];
}
gene[g].feature.nz[gene[g].nk - 1] = -1;
gene[g].nk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
}
/* refine negative point */
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
gene[g].feature.nz[q] = i;
gene[g].feature.nx[q] = j;
gene[g].feature.ny[q] = k;
g++;
}
/* add negative point, remove positive point */
if (best_gene.pk > 1 && best_gene.nk < CCV_BBF_POINT_MAX - 1)
{
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
gene[g].feature.nz[gene[g].nk] = i;
gene[g].feature.nx[gene[g].nk] = j;
gene[g].feature.ny[gene[g].nk] = k;
gene[g].nk++;
for (p = q; p < best_gene.pk - 1; p++)
{
gene[g].feature.pz[p] = gene[g].feature.pz[p + 1];
gene[g].feature.px[p] = gene[g].feature.px[p + 1];
gene[g].feature.py[p] = gene[g].feature.py[p + 1];
}
gene[g].feature.pz[gene[g].pk - 1] = -1;
gene[g].pk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
}
}
if (best_gene.pk > 1)
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
for (i = q; i < best_gene.pk - 1; i++)
{
gene[g].feature.pz[i] = gene[g].feature.pz[i + 1];
gene[g].feature.px[i] = gene[g].feature.px[i + 1];
gene[g].feature.py[i] = gene[g].feature.py[i + 1];
}
gene[g].feature.pz[gene[g].pk - 1] = -1;
gene[g].pk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
if (best_gene.nk > 1)
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
for (i = q; i < best_gene.nk - 1; i++)
{
gene[g].feature.nz[i] = gene[g].feature.nz[i + 1];
gene[g].feature.nx[i] = gene[g].feature.nx[i + 1];
gene[g].feature.ny[i] = gene[g].feature.ny[i + 1];
}
gene[g].feature.nz[gene[g].nk - 1] = -1;
gene[g].nk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
gene[g] = best_gene;
g++;
printf("float search round : %d\n", t);
ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, CCV_BBF_POINT_MIN, posdata, posnum, negdata, negnum, size, pw, nw);
if (local_gene.error >= best_gene.error - 1e-10)
break;
best_gene = local_gene;
}
ccfree(gene);
gsl_rng_free(rng);
return best_gene.feature;
}
static int _ccv_write_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier)
{
FILE* w = fopen(file, "wb");
if (w == 0) return -1;
fprintf(w, "%d\n", classifier->count);
union { float fl; int i; } fli;
fli.fl = classifier->threshold;
fprintf(w, "%d\n", fli.i);
int i, j;
for (i = 0; i < classifier->count; i++)
{
fprintf(w, "%d\n", classifier->feature[i].size);
for (j = 0; j < classifier->feature[i].size; j++)
{
fprintf(w, "%d %d %d\n", classifier->feature[i].px[j], classifier->feature[i].py[j], classifier->feature[i].pz[j]);
fprintf(w, "%d %d %d\n", classifier->feature[i].nx[j], classifier->feature[i].ny[j], classifier->feature[i].nz[j]);
}
union { float fl; int i; } flia, flib;
flia.fl = classifier->alpha[i * 2];
flib.fl = classifier->alpha[i * 2 + 1];
fprintf(w, "%d %d\n", flia.i, flib.i);
}
fclose(w);
return 0;
}
static int _ccv_read_background_data(const char* file, unsigned char** negdata, int* negnum, ccv_size_t size)
{
int stat = 0;
FILE* r = fopen(file, "rb");
if (r == 0) return -1;
stat |= fread(negnum, sizeof(int), 1, r);
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < *negnum; i++)
{
negdata[i] = (unsigned char*)ccmalloc(isizs012);
stat |= fread(negdata[i], 1, isizs012, r);
}
fclose(r);
return 0;
}
static int _ccv_write_background_data(const char* file, unsigned char** negdata, int negnum, ccv_size_t size)
{
FILE* w = fopen(file, "w");
if (w == 0) return -1;
fwrite(&negnum, sizeof(int), 1, w);
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < negnum; i++)
fwrite(negdata[i], 1, isizs012, w);
fclose(w);
return 0;
}
static int _ccv_resume_bbf_cascade_training_state(const char* file, int* i, int* k, int* bg, double* pw, double* nw, int posnum, int negnum)
{
int stat = 0;
FILE* r = fopen(file, "r");
if (r == 0) return -1;
stat |= fscanf(r, "%d %d %d", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
pw[j] = dbi.db;
}
for (j = 0; j < negnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
nw[j] = dbi.db;
}
fclose(r);
return 0;
}
static int _ccv_save_bbf_cacade_training_state(const char* file, int i, int k, int bg, double* pw, double* nw, int posnum, int negnum)
{
FILE* w = fopen(file, "w");
if (w == 0) return -1;
fprintf(w, "%d %d %d\n", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; ++j)
{
dbi.db = pw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
for (j = 0; j < negnum; ++j)
{
dbi.db = nw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
fclose(w);
return 0;
}
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
int i, j, k;
/* allocate memory for usage */
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
cascade->count = 0;
cascade->size = size;
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(sizeof(ccv_bbf_stage_classifier_t));
unsigned char** posdata = (unsigned char**)ccmalloc(posnum * sizeof(unsigned char*));
unsigned char** negdata = (unsigned char**)ccmalloc(negnum * sizeof(unsigned char*));
double* pw = (double*)ccmalloc(posnum * sizeof(double));
double* nw = (double*)ccmalloc(negnum * sizeof(double));
float* peval = (float*)ccmalloc(posnum * sizeof(float));
float* neval = (float*)ccmalloc(negnum * sizeof(float));
double inv_balance_k = 1. / params.balance_k;
/* balance factor k, and weighted with 0.01 */
params.balance_k *= 0.01;
inv_balance_k *= 0.01;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs01 = isizs0 + steps[1] * (cascade->size.height >> 1);
i = 0;
k = 0;
int bg = 0;
int cacheK = 10;
/* state resume code */
char buf[1024];
sprintf(buf, "%s/stat.txt", dir);
_ccv_resume_bbf_cascade_training_state(buf, &i, &k, &bg, pw, nw, posnum, negnum);
if (i > 0)
{
cascade->count = i;
ccfree(cascade->stage_classifier);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(i * sizeof(ccv_bbf_stage_classifier_t));
for (j = 0; j < i; j++)
{
sprintf(buf, "%s/stage-%d.txt", dir, j);
_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[j]);
}
}
if (k > 0)
cacheK = k;
int rpos, rneg = 0;
if (bg)
{
sprintf(buf, "%s/negs.txt", dir);
_ccv_read_background_data(buf, negdata, &rneg, cascade->size);
}
for (; i < params.layer; i++)
{
if (!bg)
{
rneg = _ccv_prepare_background_data(cascade, bgfiles, bgnum, negdata, negnum);
/* save state of background data */
sprintf(buf, "%s/negs.txt", dir);
_ccv_write_background_data(buf, negdata, rneg, cascade->size);
bg = 1;
}
double totalw;
/* save state of cascade : level, weight etc. */
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
ccv_bbf_stage_classifier_t classifier;
if (k > 0)
{
/* resume state of classifier */
sprintf( buf, "%s/stage-%d.txt", dir, i );
_ccv_read_bbf_stage_classifier(buf, &classifier);
} else {
/* initialize classifier */
for (j = 0; j < posnum; j++)
pw[j] = params.balance_k;
for (j = 0; j < rneg; j++)
nw[j] = inv_balance_k;
classifier.count = k;
classifier.threshold = 0;
classifier.feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * sizeof(ccv_bbf_feature_t));
classifier.alpha = (float*)ccmalloc(cacheK * 2 * sizeof(float));
}
_ccv_prepare_positive_data(posimg, posdata, cascade->size, posnum);
rpos = _ccv_prune_positive_data(cascade, posdata, posnum, cascade->size);
printf("%d postivie data and %d negative data in training\n", rpos, rneg);
/* reweight to 1.00 */
totalw = 0;
for (j = 0; j < rpos; j++)
totalw += pw[j];
for (j = 0; j < rneg; j++)
totalw += nw[j];
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
for (; ; k++)
{
/* get overall true-positive, false-positive rate and threshold */
double tp = 0, fp = 0, etp = 0, efp = 0;
_ccv_bbf_eval_data(&classifier, posdata, rpos, negdata, rneg, cascade->size, peval, neval);
_ccv_sort_32f(peval, rpos, 0);
classifier.threshold = peval[(int)((1. - params.pos_crit) * rpos)] - 1e-6;
for (j = 0; j < rpos; j++)
{
if (peval[j] >= 0)
++tp;
if (peval[j] >= classifier.threshold)
++etp;
}
tp /= rpos; etp /= rpos;
for (j = 0; j < rneg; j++)
{
if (neval[j] >= 0)
++fp;
if (neval[j] >= classifier.threshold)
++efp;
}
fp /= rneg; efp /= rneg;
printf("stage classifier real TP rate : %f, FP rate : %f\n", tp, fp);
printf("stage classifier TP rate : %f, FP rate : %f at threshold : %f\n", etp, efp, classifier.threshold);
if (k > 0)
{
/* save classifier state */
sprintf(buf, "%s/stage-%d.txt", dir, i);
_ccv_write_bbf_stage_classifier(buf, &classifier);
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
}
if (etp > params.pos_crit && efp < params.neg_crit)
break;
/* TODO: more post-process is needed in here */
/* select the best feature in current distribution through genetic algorithm optimization */
ccv_bbf_feature_t best;
if (params.optimizer == CCV_BBF_GENETIC_OPT)
{
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
} else if (params.optimizer == CCV_BBF_FLOAT_OPT) {
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, 0, cascade->size, pw, nw);
} else {
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, &best, cascade->size, pw, nw);
}
double err = _ccv_bbf_error_rate(&best, posdata, rpos, negdata, rneg, cascade->size, pw, nw);
double rw = (1 - err) / err;
totalw = 0;
/* reweight */
for (j = 0; j < rpos; j++)
{
unsigned char* u8[] = { posdata[j], posdata[j] + isizs0, posdata[j] + isizs01 };
if (!_ccv_run_bbf_feature(&best, steps, u8))
pw[j] *= rw;
pw[j] *= params.balance_k;
totalw += pw[j];
}
for (j = 0; j < rneg; j++)
{
unsigned char* u8[] = { negdata[j], negdata[j] + isizs0, negdata[j] + isizs01 };
if (_ccv_run_bbf_feature(&best, steps, u8))
nw[j] *= rw;
nw[j] *= inv_balance_k;
totalw += nw[j];
}
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
double c = log(rw);
printf("coefficient of feature %d: %f\n", k + 1, c);
classifier.count = k + 1;
/* resizing classifier */
if (k >= cacheK)
{
ccv_bbf_feature_t* feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * 2 * sizeof(ccv_bbf_feature_t));
memcpy(feature, classifier.feature, cacheK * sizeof(ccv_bbf_feature_t));
ccfree(classifier.feature);
float* alpha = (float*)ccmalloc(cacheK * 4 * sizeof(float));
memcpy(alpha, classifier.alpha, cacheK * 2 * sizeof(float));
ccfree(classifier.alpha);
classifier.feature = feature;
classifier.alpha = alpha;
cacheK *= 2;
}
/* setup new feature */
classifier.feature[k] = best;
classifier.alpha[k * 2] = -c;
classifier.alpha[k * 2 + 1] = c;
}
cascade->count = i + 1;
ccv_bbf_stage_classifier_t* stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
memcpy(stage_classifier, cascade->stage_classifier, i * sizeof(ccv_bbf_stage_classifier_t));
ccfree(cascade->stage_classifier);
stage_classifier[i] = classifier;
cascade->stage_classifier = stage_classifier;
k = 0;
bg = 0;
for (j = 0; j < rpos; j++)
ccfree(posdata[j]);
for (j = 0; j < rneg; j++)
ccfree(negdata[j]);
}
ccfree(neval);
ccfree(peval);
ccfree(nw);
ccfree(pw);
ccfree(negdata);
ccfree(posdata);
ccfree(cascade);
}
#else
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
fprintf(stderr, " ccv_bbf_classifier_cascade_new requires libgsl support, please compile ccv with libgsl.\n");
}
#endif
static int _ccv_is_equal(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
static int _ccv_is_equal_same_class(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->id == r1->id &&
r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
ccv_array_t* ccv_bbf_detect_objects(ccv_dense_matrix_t* a, ccv_bbf_classifier_cascade_t** _cascade, int count, ccv_bbf_param_t params)
{
int hr = a->rows / params.size.height;
int wr = a->cols / params.size.width;
double scale = pow(2., 1. / (params.interval + 1.));
int next = params.interval + 1;
int scale_upto = (int)(log((double)ccv_min(hr, wr)) / log(scale));
ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca((scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
memset(pyr, 0, (scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_resample(a, &pyr[0], 0, a->rows * _cascade[0]->size.height / params.size.height, a->cols * _cascade[0]->size.width / params.size.width, CCV_INTER_AREA);
else
pyr[0] = a;
int i, j, k, t, x, y, q;
for (i = 1; i <= params.interval; i++)
ccv_resample(pyr[0], &pyr[i * 4], 0, (int)(pyr[0]->rows / pow(scale, i)), (int)(pyr[0]->cols / pow(scale, i)), CCV_INTER_AREA);
for (i = next; i < scale_upto + next * 2; i++)
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4], 0, 0, 0);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 1], 0, 1, 0);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 2], 0, 0, 1);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 3], 0, 1, 1);
}
ccv_array_t* idx_seq;
ccv_array_t* seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* result_seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
/* detect in multi scale */
for (t = 0; t < count; t++)
{
ccv_bbf_classifier_cascade_t* cascade = _cascade[t];
float scale_x = (float) params.size.width / (float) cascade->size.width;
float scale_y = (float) params.size.height / (float) cascade->size.height;
ccv_array_clear(seq);
for (i = 0; i < scale_upto; i++)
{
int dx[] = {0, 1, 0, 1};
int dy[] = {0, 0, 1, 1};
int i_rows = pyr[i * 4 + next * 8]->rows - (cascade->size.height >> 2);
int steps[] = { pyr[i * 4]->step, pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8]->step };
int i_cols = pyr[i * 4 + next * 8]->cols - (cascade->size.width >> 2);
int paddings[] = { pyr[i * 4]->step * 4 - i_cols * 4,
pyr[i * 4 + next * 4]->step * 2 - i_cols * 2,
pyr[i * 4 + next * 8]->step - i_cols };
for (q = 0; q < (params.accurate ? 4 : 1); q++)
{
unsigned char* u8[] = { pyr[i * 4]->data.u8 + dx[q] * 2 + dy[q] * pyr[i * 4]->step * 2, pyr[i * 4 + next * 4]->data.u8 + dx[q] + dy[q] * pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8 + q]->data.u8 };
for (y = 0; y < i_rows; y++)
{
for (x = 0; x < i_cols; x++)
{
float sum;
int flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (j = 0; j < cascade->count; ++j, ++classifier)
{
sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (k = 0; k < classifier->count; ++k, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (flag)
{
ccv_comp_t comp;
comp.rect = ccv_rect((int)((x * 4 + dx[q] * 2) * scale_x + 0.5), (int)((y * 4 + dy[q] * 2) * scale_y + 0.5), (int)(cascade->size.width * scale_x + 0.5), (int)(cascade->size.height * scale_y + 0.5));
comp.id = t;
comp.neighbors = 1;
comp.confidence = sum;
ccv_array_push(seq, &comp);
}
u8[0] += 4;
u8[1] += 2;
u8[2] += 1;
}
u8[0] += paddings[0];
u8[1] += paddings[1];
u8[2] += paddings[2];
}
}
scale_x *= scale;
scale_y *= scale;
}
/* the following code from OpenCV's haar feature implementation */
if(params.min_neighbors == 0)
{
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
ccv_array_push(result_seq, comp);
}
} else {
idx_seq = 0;
ccv_array_clear(seq2);
// group retrieved rectangles in order to filter out noise
int ncomp = ccv_array_group(seq, &idx_seq, _ccv_is_equal_same_class, 0);
ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t));
memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t));
// count number of neighbors
for(i = 0; i < seq->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq, i);
int idx = *(int*)ccv_array_get(idx_seq, i);
if (comps[idx].neighbors == 0)
comps[idx].confidence = r1.confidence;
++comps[idx].neighbors;
comps[idx].rect.x += r1.rect.x;
comps[idx].rect.y += r1.rect.y;
comps[idx].rect.width += r1.rect.width;
comps[idx].rect.height += r1.rect.height;
comps[idx].id = r1.id;
comps[idx].confidence = ccv_max(comps[idx].confidence, r1.confidence);
}
// calculate average bounding box
for(i = 0; i < ncomp; i++)
{
int n = comps[i].neighbors;
if(n >= params.min_neighbors)
{
ccv_comp_t comp;
comp.rect.x = (comps[i].rect.x * 2 + n) / (2 * n);
comp.rect.y = (comps[i].rect.y * 2 + n) / (2 * n);
comp.rect.width = (comps[i].rect.width * 2 + n) / (2 * n);
comp.rect.height = (comps[i].rect.height * 2 + n) / (2 * n);
comp.neighbors = comps[i].neighbors;
comp.id = comps[i].id;
comp.confidence = comps[i].confidence;
ccv_array_push(seq2, &comp);
}
}
// filter out small face rectangles inside large face rectangles
for(i = 0; i < seq2->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq2, i);
int flag = 1;
for(j = 0; j < seq2->rnum; j++)
{
ccv_comp_t r2 = *(ccv_comp_t*)ccv_array_get(seq2, j);
int distance = (int)(r2.rect.width * 0.25 + 0.5);
if(i != j &&
r1.id == r2.id &&
r1.rect.x >= r2.rect.x - distance &&
r1.rect.y >= r2.rect.y - distance &&
r1.rect.x + r1.rect.width <= r2.rect.x + r2.rect.width + distance &&
r1.rect.y + r1.rect.height <= r2.rect.y + r2.rect.height + distance &&
(r2.neighbors > ccv_max(3, r1.neighbors) || r1.neighbors < 3))
{
flag = 0;
break;
}
}
if(flag)
ccv_array_push(result_seq, &r1);
}
ccv_array_free(idx_seq);
ccfree(comps);
}
}
ccv_array_free(seq);
ccv_array_free(seq2);
ccv_array_t* result_seq2;
/* the following code from OpenCV's haar feature implementation */
if (params.flags & CCV_BBF_NO_NESTED)
{
result_seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
idx_seq = 0;
// group retrieved rectangles in order to filter out noise
int ncomp = ccv_array_group(result_seq, &idx_seq, _ccv_is_equal, 0);
ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t));
memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t));
// count number of neighbors
for(i = 0; i < result_seq->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(result_seq, i);
int idx = *(int*)ccv_array_get(idx_seq, i);
if (comps[idx].neighbors == 0 || comps[idx].confidence < r1.confidence)
{
comps[idx].confidence = r1.confidence;
comps[idx].neighbors = 1;
comps[idx].rect = r1.rect;
comps[idx].id = r1.id;
}
}
// calculate average bounding box
for(i = 0; i < ncomp; i++)
if(comps[i].neighbors)
ccv_array_push(result_seq2, &comps[i]);
ccv_array_free(result_seq);
ccfree(comps);
} else {
result_seq2 = result_seq;
}
for (i = 1; i < scale_upto + next * 2; i++)
ccv_matrix_free(pyr[i * 4]);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_matrix_free(pyr[i * 4 + 1]);
ccv_matrix_free(pyr[i * 4 + 2]);
ccv_matrix_free(pyr[i * 4 + 3]);
}
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_matrix_free(pyr[0]);
return result_seq2;
}
ccv_bbf_classifier_cascade_t* ccv_load_bbf_classifier_cascade(const char* directory)
{
char buf[1024];
sprintf(buf, "%s/cascade.txt", directory);
int s, i;
FILE* r = fopen(buf, "r");
if (r == 0)
return 0;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
s = fscanf(r, "%d %d %d", &cascade->count, &cascade->size.width, &cascade->size.height);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++)
{
sprintf(buf, "%s/stage-%d.txt", directory, i);
if (_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[i]) < 0)
{
cascade->count = i;
break;
}
}
fclose(r);
return cascade;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_classifier_cascade_read_binary(char* s)
{
int i;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
memcpy(&cascade->count, s, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(&cascade->size.width, s, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(&cascade->size.height, s, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(&classifier->count, s, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(&classifier->threshold, s, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
memcpy(classifier->feature, s, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(classifier->alpha, s, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
return cascade;
}
int ccv_bbf_classifier_cascade_write_binary(ccv_bbf_classifier_cascade_t* cascade, char* s, int slen)
{
int i;
int len = sizeof(cascade->count) + sizeof(cascade->size.width) + sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
len += sizeof(classifier->count) + sizeof(classifier->threshold) + classifier->count * sizeof(ccv_bbf_feature_t) + classifier->count * 2 * sizeof(float);
if (slen >= len)
{
memcpy(s, &cascade->count, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(s, &cascade->size.width, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(s, &cascade->size.height, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(s, &classifier->count, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(s, &classifier->threshold, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
memcpy(s, classifier->feature, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(s, classifier->alpha, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
}
return len;
}
void ccv_bbf_classifier_cascade_free(ccv_bbf_classifier_cascade_t* cascade)
{
int i;
for (i = 0; i < cascade->count; ++i)
{
ccfree(cascade->stage_classifier[i].feature);
ccfree(cascade->stage_classifier[i].alpha);
}
ccfree(cascade->stage_classifier);
ccfree(cascade);
}
|
rowwise_pick.h | /*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/rowwise_pick.h
* \brief Template implementation for rowwise pick operators.
*/
#ifndef DGL_ARRAY_CPU_ROWWISE_PICK_H_
#define DGL_ARRAY_CPU_ROWWISE_PICK_H_
#include <dgl/array.h>
#include <functional>
namespace dgl {
namespace aten {
namespace impl {
// User-defined function for picking elements from one row.
//
// The column indices of the given row are stored in
// [col + off, col + off + len)
//
// Similarly, the data indices are stored in
// [data + off, data + off + len)
// Data index pointer could be NULL, which means data[i] == i
//
// *ATTENTION*: This function will be invoked concurrently. Please make sure
// it is thread-safe.
//
// \param rowid The row to pick from.
// \param off Starting offset of this row.
// \param len NNZ of the row.
// \param col Pointer of the column indices.
// \param data Pointer of the data indices.
// \param out_idx Picked indices in [off, off + len).
template <typename IdxType>
using PickFn = std::function<void(
IdxType rowid, IdxType off, IdxType len,
const IdxType* col, const IdxType* data,
IdxType* out_idx)>;
// Template for picking non-zero values row-wise. The implementation utilizes
// OpenMP parallelization on rows because each row performs computation independently.
template <typename IdxType>
COOMatrix CSRRowWisePick(CSRMatrix mat, IdArray rows,
int64_t num_picks, bool replace, PickFn<IdxType> pick_fn) {
using namespace aten;
const IdxType* indptr = static_cast<IdxType*>(mat.indptr->data);
const IdxType* indices = static_cast<IdxType*>(mat.indices->data);
const IdxType* data = CSRHasData(mat)? static_cast<IdxType*>(mat.data->data) : nullptr;
const IdxType* rows_data = static_cast<IdxType*>(rows->data);
const int64_t num_rows = rows->shape[0];
const auto& ctx = mat.indptr->ctx;
// To leverage OMP parallelization, we create two arrays to store
// picked src and dst indices. Each array is of length num_rows * num_picks.
// For rows whose nnz < num_picks, the indices are padded with -1.
//
// We check whether all the given rows
// have at least num_picks number of nnz when replace is false.
//
// If the check holds, remove -1 elements by remove_if operation, which simply
// moves valid elements to the head of arrays and create a view of the original
// array. The implementation consumes a little extra memory than the actual requirement.
//
// Otherwise, directly use the row and col arrays to construct the result COO matrix.
IdArray picked_row = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx);
IdArray picked_col = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx);
IdArray picked_idx = Full(-1, num_rows * num_picks, sizeof(IdxType) * 8, ctx);
IdxType* picked_rdata = static_cast<IdxType*>(picked_row->data);
IdxType* picked_cdata = static_cast<IdxType*>(picked_col->data);
IdxType* picked_idata = static_cast<IdxType*>(picked_idx->data);
bool all_has_fanout = true;
if (replace) {
all_has_fanout = true;
} else {
#pragma omp parallel for reduction(&&:all_has_fanout)
for (int64_t i = 0; i < num_rows; ++i) {
const IdxType rid = rows_data[i];
const IdxType len = indptr[rid + 1] - indptr[rid];
all_has_fanout = all_has_fanout && (len >= num_picks);
}
}
#pragma omp parallel for
for (int64_t i = 0; i < num_rows; ++i) {
const IdxType rid = rows_data[i];
CHECK_LT(rid, mat.num_rows);
const IdxType off = indptr[rid];
const IdxType len = indptr[rid + 1] - off;
if (len <= num_picks && !replace) {
// nnz <= num_picks and w/o replacement, take all nnz
for (int64_t j = 0; j < len; ++j) {
picked_rdata[i * num_picks + j] = rid;
picked_cdata[i * num_picks + j] = indices[off + j];
picked_idata[i * num_picks + j] = data? data[off + j] : off + j;
}
} else {
pick_fn(rid, off, len,
indices, data,
picked_idata + i * num_picks);
for (int64_t j = 0; j < num_picks; ++j) {
const IdxType picked = picked_idata[i * num_picks + j];
picked_rdata[i * num_picks + j] = rid;
picked_cdata[i * num_picks + j] = indices[picked];
picked_idata[i * num_picks + j] = data? data[picked] : picked;
}
}
}
if (!all_has_fanout) {
// correct the array by remove_if
IdxType* new_row_end = std::remove_if(picked_rdata, picked_rdata + num_rows * num_picks,
[] (IdxType i) { return i == -1; });
IdxType* new_col_end = std::remove_if(picked_cdata, picked_cdata + num_rows * num_picks,
[] (IdxType i) { return i == -1; });
IdxType* new_idx_end = std::remove_if(picked_idata, picked_idata + num_rows * num_picks,
[] (IdxType i) { return i == -1; });
const int64_t new_len = (new_row_end - picked_rdata);
CHECK_EQ(new_col_end - picked_cdata, new_len);
CHECK_EQ(new_idx_end - picked_idata, new_len);
picked_row = picked_row.CreateView({new_len}, picked_row->dtype);
picked_col = picked_col.CreateView({new_len}, picked_col->dtype);
picked_idx = picked_idx.CreateView({new_len}, picked_idx->dtype);
}
return COOMatrix(mat.num_rows, mat.num_cols,
picked_row, picked_col, picked_idx);
}
// Template for picking non-zero values row-wise. The implementation first slices
// out the corresponding rows and then converts it to CSR format. It then performs
// row-wise pick on the CSR matrix and rectifies the returned results.
template <typename IdxType>
COOMatrix COORowWisePick(COOMatrix mat, IdArray rows,
int64_t num_picks, bool replace, PickFn<IdxType> pick_fn) {
using namespace aten;
const auto& csr = COOToCSR(COOSliceRows(mat, rows));
const IdArray new_rows = Range(0, rows->shape[0], rows->dtype.bits, rows->ctx);
const auto& picked = CSRRowWisePick<IdxType>(csr, new_rows, num_picks, replace, pick_fn);
return COOMatrix(mat.num_rows, mat.num_cols,
IndexSelect(rows, picked.row), // map the row index to the correct one
picked.col,
picked.data);
}
} // namespace impl
} // namespace aten
} // namespace dgl
#endif // DGL_ARRAY_CPU_ROWWISE_PICK_H_
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelFxImage() applies a channel expression to the specified image. The
% expression consists of one or more channels, either mnemonic or numeric (e.g.
% red, 1), separated by actions as follows:
%
% <=> exchange two channels (e.g. red<=>blue)
% => copy one channel to another channel (e.g. red=>green)
% = assign a constant value to a channel (e.g. red=50%)
% , write new image channels in the specified order (e.g. red, green)
% | add a new output image for the next set of channel operations
% ; move to the next input image for the source of channel data
%
% For example, to create 3 grayscale images from the red, green, and blue
% channels of an image, use:
%
% -channel-fx "red; green; blue"
%
% A channel without an operation symbol implies separate (i.e, semicolon).
%
% The format of the ChannelFxImage method is:
%
% Image *ChannelFxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A channel expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef enum
{
ExtractChannelOp,
AssignChannelOp,
ExchangeChannelOp,
TransferChannelOp
} ChannelFx;
static MagickBooleanType ChannelImage(Image *destination_image,
const PixelChannel destination_channel,const ChannelFx channel_op,
const Image *source_image,const PixelChannel source_channel,
const Quantum pixel,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
size_t
height,
width;
ssize_t
y;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
destination_view=AcquireAuthenticCacheView(destination_image,exception);
height=MagickMin(source_image->rows,destination_image->rows);
width=MagickMin(source_image->columns,destination_image->columns);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelTrait
destination_traits,
source_traits;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
destination_traits=GetPixelChannelTraits(destination_image,
destination_channel);
source_traits=GetPixelChannelTraits(source_image,source_channel);
if ((destination_traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
for (x=0; x < (ssize_t) width; x++)
{
if (channel_op == AssignChannelOp)
SetPixelChannel(destination_image,destination_channel,pixel,q);
else
SetPixelChannel(destination_image,destination_channel,
GetPixelChannel(source_image,source_channel,p),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(destination_image);
}
if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *ChannelFxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define ChannelFxImageTag "ChannelFx/Image"
ChannelFx
channel_op;
ChannelType
channel_mask;
char
token[MagickPathExtent];
const char
*p;
const Image
*source_image;
double
pixel;
Image
*destination_image;
MagickBooleanType
status;
PixelChannel
source_channel,
destination_channel;
ssize_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
source_image=image;
destination_image=CloneImage(source_image,0,0,MagickTrue,exception);
if (destination_image == (Image *) NULL)
return((Image *) NULL);
if (expression == (const char *) NULL)
return(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
pixel=0.0;
p=(char *) expression;
(void) GetNextToken(p,&p,MagickPathExtent,token);
channel_op=ExtractChannelOp;
for (channels=0; *token != '\0'; )
{
ssize_t
i;
/*
Interpret channel expression.
*/
switch (*token)
{
case ',':
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case '|':
{
if (GetNextImageInList(source_image) != (Image *) NULL)
source_image=GetNextImageInList(source_image);
else
source_image=GetFirstImageInList(source_image);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case ';':
{
Image
*canvas;
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,
exception);
}
canvas=CloneImage(source_image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
{
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
AppendImageToList(&destination_image,canvas);
destination_image=GetLastImageInList(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
(void) GetNextToken(p,&p,MagickPathExtent,token);
channels=0;
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
break;
}
default:
break;
}
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
source_channel=(PixelChannel) i;
channel_op=ExtractChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '<')
{
channel_op=ExchangeChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '=')
{
if (channel_op != ExchangeChannelOp)
channel_op=AssignChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '>')
{
if (channel_op != ExchangeChannelOp)
channel_op=TransferChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
switch (channel_op)
{
case AssignChannelOp:
case ExchangeChannelOp:
case TransferChannelOp:
{
if (channel_op == AssignChannelOp)
pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0);
else
{
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
}
destination_channel=(PixelChannel) i;
if (i >= (ssize_t) GetPixelChannels(destination_image))
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
if (image->colorspace != UndefinedColorspace)
switch (destination_channel)
{
case RedPixelChannel:
case GreenPixelChannel:
case BluePixelChannel:
case BlackPixelChannel:
case IndexPixelChannel:
break;
case AlphaPixelChannel:
{
destination_image->alpha_trait=BlendPixelTrait;
break;
}
case CompositeMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | CompositeMaskChannel);
break;
}
case ReadMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | ReadMaskChannel);
break;
}
case WriteMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | WriteMaskChannel);
break;
}
case MetaPixelChannel:
default:
{
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
break;
}
}
channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token));
if (((channels >= 1) || (destination_channel >= 1)) &&
(IsGrayColorspace(destination_image->colorspace) != MagickFalse))
(void) SetImageColorspace(destination_image,sRGBColorspace,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
default:
break;
}
status=ChannelImage(destination_image,destination_channel,channel_op,
source_image,source_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
if (channel_op == ExchangeChannelOp)
{
status=ChannelImage(destination_image,source_channel,channel_op,
source_image,destination_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
}
switch (channel_op)
{
case ExtractChannelOp:
{
channel_mask=(ChannelType) (channel_mask |
(1UL << destination_channel));
destination_channel=(PixelChannel) (destination_channel+1);
break;
}
default:
break;
}
status=SetImageProgress(source_image,ChannelFxImageTag,p-expression,
strlen(expression));
if (status == MagickFalse)
break;
}
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,exception);
}
return(GetFirstImageInList(destination_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *images,const ColorspaceType colorspace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o colorspace: the image colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse)
{
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (colorspace != UndefinedColorspace)
(void) SetImageColorspace(combine_image,colorspace,exception);
else
if (fabs(image->gamma-1.0) <= MagickEpsilon)
(void) SetImageColorspace(combine_image,RGBColorspace,exception);
else
(void) SetImageColorspace(combine_image,sRGBColorspace,exception);
switch (combine_image->colorspace)
{
case UndefinedColorspace:
case sRGBColorspace:
{
if (GetImageListLength(image) > 3)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
if (GetImageListLength(image) > 1)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case CMYKColorspace:
{
if (GetImageListLength(image) > 4)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
default:
break;
}
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
Quantum
*pixels;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++)
{
ssize_t
x;
PixelChannel channel = GetPixelChannelChannel(combine_image,i);
PixelTrait traits = GetPixelChannelTraits(combine_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (next == (Image *) NULL)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
if (x < (ssize_t) next->columns)
{
q[i]=GetPixelIntensity(next,p);
p+=GetPixelChannels(next);
}
q+=GetPixelChannels(combine_image);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CombineImageTag,progress,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImage() separates a channel from the image and returns it as a
% grayscale image.
%
% The format of the SeparateImage method is:
%
% Image *SeparateImage(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImage(const Image *image,
const ChannelType channel_type,ExceptionInfo *exception)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
#define SeparateImageTag "Separate/Image"
CacheView
*image_view,
*separate_view;
Image
*separate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse)
{
separate_image=DestroyImage(separate_image);
return((Image *) NULL);
}
separate_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(separate_image,GRAYColorspace,exception);
separate_image->gamma=image->gamma;
/*
Separate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
separate_view=AcquireAuthenticCacheView(separate_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(GetChannelBit(channel_type,channel) == 0))
continue;
SetPixelChannel(separate_image,GrayPixelChannel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
}
if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
separate_view=DestroyCacheView(separate_view);
image_view=DestroyCacheView(image_view);
(void) SetImageChannelMask(separate_image,DefaultChannels);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% Image *SeparateImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception)
{
Image
*images,
*separate_image;
ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
separate_image=SeparateImage(image,(ChannelType) (1UL << channel),
exception);
if (separate_image != (Image *) NULL)
AppendImageToList(&images,separate_image);
}
if (images == (Image *) NULL)
images=SeparateImage(image,UndefinedChannel,exception);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelOption alpha_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel,
% DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel,
% OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel,
% and TransparentAlphaChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p,
const double alpha,const Quantum *q,const double beta,
Quantum *composite)
{
double
Da,
gamma,
Sa;
ssize_t
i;
/*
Compose pixel p over pixel q with the given alpha.
*/
Sa=QuantumScale*alpha;
Da=QuantumScale*beta,
gamma=Sa*(-Da)+Sa+Da;
gamma=PerceptibleReciprocal(gamma);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
switch (channel)
{
case RedPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->red,alpha));
break;
}
case GreenPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->green,alpha));
break;
}
case BluePixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->blue,alpha));
break;
}
case BlackPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->black,alpha));
break;
}
case AlphaPixelChannel:
{
composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da));
break;
}
default:
break;
}
}
}
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelOption alpha_type,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
if (image->alpha_trait == BlendPixelTrait)
return(status);
image->alpha_trait=BlendPixelTrait;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=CopyPixelTrait;
return(status);
}
case BackgroundAlphaChannel:
{
/*
Set transparent pixels to background color.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,q) == TransparentAlpha)
{
SetPixelViaPixelInfo(image,&image->background_color,q);
SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
{
image->alpha_trait=UpdatePixelTrait;
status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0,
exception);
break;
}
case DeactivateAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=CopyPixelTrait;
break;
}
case DisassociateAlphaChannel:
{
/*
Disassociate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image->alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
Sa;
ssize_t
i;
Sa=QuantumScale*GetPixelAlpha(image,q);
gamma=PerceptibleReciprocal(Sa);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=UndefinedPixelTrait;
return(status);
}
case DiscreteAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=UpdatePixelTrait;
break;
}
case ExtractAlphaChannel:
{
status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0,
exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OffAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
return(status);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OnAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=BlendPixelTrait;
break;
}
case OpaqueAlphaChannel:
{
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case RemoveAlphaChannel:
{
/*
Remove transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
FlattenPixelInfo(image,&image->background_color,
image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=image->background_color.alpha_trait;
break;
}
case SetAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case ShapeAlphaChannel:
{
PixelInfo
background;
/*
Remove transparency.
*/
ConformPixelInfo(image,&image->background_color,&background,exception);
background.alpha_trait=BlendPixelTrait;
image->alpha_trait=BlendPixelTrait;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=background;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel.alpha=GetPixelIntensity(image,q);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
break;
}
case TransparentAlphaChannel:
{
status=SetImageAlpha(image,TransparentAlpha,exception);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
(void) SetPixelChannelMask(image,image->channel_mask);
return(SyncImagePixelCache(image,exception));
}
|
pi_omp_lock_4.c | /*
This program will numerically compute the integral of
4/(1+x*x)
from 0 to 1. The value of this integral is pi -- which
is great since it gives us an easy way to check the answer.
The is the original sequential program. It uses the timer
from the OpenMP runtime library
History: Written by Tim Mattson, 11/99.
*/
#include <stdio.h>
#include <omp.h>
static long num_steps = 1024 * 1024 * 1024;
double step;
int main () {
const int MAX_T = 16;
int i, t;
double x, pi;
double start_time, run_time;
omp_lock_t lock;
omp_init_lock(&lock);
step = 1.0/(double) num_steps;
for(t = 1; t <= MAX_T; t*=2) {
start_time = omp_get_wtime();
omp_set_num_threads(t);
pi = 0.0;
#pragma omp parallel
{
int i, nt;
double x, sum = 0;
i = omp_get_thread_num();
nt = omp_get_num_threads();
for (; i < num_steps; i += nt){
x = (i + 0.5) * step;
sum += 4.0/(1.0+x*x);
}
omp_set_lock(&lock);
pi += sum;
omp_unset_lock(&lock);
}
pi = pi * step;
run_time = omp_get_wtime() - start_time;
printf("pi with %d threads: %.16lf in %lf seconds\n",t , pi,run_time);
}
omp_destroy_lock(&lock);
}
|
omp_task_red_taskloop.c | // RUN: %libomp-compile-and-run
// Parsing error until gcc8:
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// Parsing error until clang11:
// UNSUPPORTED: clang-10, clang-9, clang-8, clang-7
// No icc compiler support yet
// XFAIL: icc
#include <stdio.h>
#include <omp.h>
int r;
int work(int k, int l)
{
return k + l + 1;
}
void bar(int i) {
#pragma omp taskgroup task_reduction(+:r)
{ int th_gen = omp_get_thread_num();
#pragma omp task in_reduction(+:r) firstprivate(i, th_gen)
{
r += work(i, 0);
printf("executing task (%d, 0), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen);
}
#pragma omp task in_reduction(+:r) firstprivate(i, th_gen)
{
r += work(i, 1);
printf("executing task (%d, 1), th %d (gen by th %d)\n", i, omp_get_thread_num(), th_gen);
}
}
}
int foo() {
int i;
int th_gen = omp_get_thread_num();
#pragma omp taskgroup task_reduction(+:r)
{
bar(0);
}
printf("th %d passed bar0\n", th_gen);
#pragma omp taskloop reduction(+:r) firstprivate(th_gen)
for (i = 1; i < 4; ++i) {
bar(i);
printf("th %d (gen by th %d) passed bar%d in taskloop\n", omp_get_thread_num(), th_gen, i);
#pragma omp task in_reduction(+:r)
r += i;
}
return 0;
}
// res = ((1+2)+(2+3)+(3+4)+(4+5)+1+2+3) = 30
#define res 30
int main()
{
r = 0;
#pragma omp parallel num_threads(2)
{ // barrier ensures threads have started before tasks creation
#pragma omp barrier
// single ensures no race condition between taskgroup reductions
#pragma omp single nowait
foo();
}
if (r == res) {
return 0;
} else {
printf("error r = %d (!= %d)\n", r, res);
return 1;
}
}
|
omp50_taskwait_depend.c | // RUN: %libomp-compile-and-run
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// clang does not yet support taskwait with depend clause
// clang-12 introduced parsing, but no codegen
// TODO: update expected result when codegen in clang is added
// icc does not yet support taskwait with depend clause
// TODO: update expected result when support for icc is added
// XFAIL: clang, icc
// REQUIRES: !abt
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "omp_my_sleep.h"
int a = 0, b = 0;
int task_grabbed = 0, task_can_proceed = 0;
int task2_grabbed = 0, task2_can_proceed = 0;
static void wait_on_flag(int *flag) {
int flag_value;
int timelimit = 30;
int secs = 0;
do {
#pragma omp atomic read
flag_value = *flag;
my_sleep(1.0);
secs++;
if (secs == timelimit) {
fprintf(stderr, "error: timeout in wait_on_flag()\n");
exit(EXIT_FAILURE);
}
} while (flag_value == 0);
}
static void signal_flag(int *flag) {
#pragma omp atomic
(*flag)++;
}
int main(int argc, char** argv) {
// Ensure two threads are running
int num_threads = omp_get_max_threads();
if (num_threads < 2)
omp_set_num_threads(2);
#pragma omp parallel shared(a)
{
int a_value;
// Let us be extra safe here
if (omp_get_num_threads() > 1) {
#pragma omp single nowait
{
// Schedule independent child task that
// waits to be flagged after sebsequent taskwait depend()
#pragma omp task
{
signal_flag(&task_grabbed);
wait_on_flag(&task_can_proceed);
}
// Let another worker thread grab the task to execute
wait_on_flag(&task_grabbed);
// This should be ignored since the task above has
// no dependency information
#pragma omp taskwait depend(inout: a)
// Signal the independent task to proceed
signal_flag(&task_can_proceed);
// Schedule child task with dependencies that taskwait does
// not care about
#pragma omp task depend(inout: b)
{
signal_flag(&task2_grabbed);
wait_on_flag(&task2_can_proceed);
#pragma omp atomic
b++;
}
// Let another worker thread grab the task to execute
wait_on_flag(&task2_grabbed);
// This should be ignored since the task above has
// dependency information on b instead of a
#pragma omp taskwait depend(inout: a)
// Signal the task to proceed
signal_flag(&task2_can_proceed);
// Generate one child task for taskwait
#pragma omp task shared(a) depend(inout: a)
{
my_sleep(1.0);
#pragma omp atomic
a++;
}
#pragma omp taskwait depend(inout: a)
#pragma omp atomic read
a_value = a;
if (a_value != 1) {
fprintf(stderr, "error: dependent task was not executed before "
"taskwait finished\n");
exit(EXIT_FAILURE);
}
} // #pragma omp single
} // if (num_threads > 1)
} // #pragma omp parallel
return EXIT_SUCCESS;
}
|
conv3x3s1_winograd23_sse_BdB.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
//
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv3x3s1_winograd23_sse_BdB(const Mat& bottom_blob, Mat& top_blob, const Option& opt,
int outch, int outh, int outw)
{
int w = bottom_blob.w;
//int h = bottom_blob.h;
int inch = bottom_blob.c;
// BEGIN transform input
Mat bottom_blob_tm = top_blob;
Mat bottom_blob_bordered= bottom_blob;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4*4, tiles, inch, 4u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
float* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 2;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
#if __AVX__
__m128 _d0, _d1, _d2, _d3;
__m128 _w0, _w1, _w2, _w3;
// load
_d0 = _mm_loadu_ps(r0);
_d1 = _mm_loadu_ps(r1);
_d2 = _mm_loadu_ps(r2);
_d3 = _mm_loadu_ps(r3);
// w = B_t * d
_w0 = _mm_sub_ps(_d0, _d2);
_w1 = _mm_add_ps(_d1, _d2);
_w2 = _mm_sub_ps(_d2, _d1);
_w3 = _mm_sub_ps(_d3, _d1);
// transpose d to d_t
_MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3);
// d = B_t * d_t
_d0 = _mm_sub_ps(_w0, _w2);
_d1 = _mm_add_ps(_w1, _w2);
_d2 = _mm_sub_ps(_w2, _w1);
_d3 = _mm_sub_ps(_w3, _w1);
// save to out_tm
_mm_storeu_ps(out_tm0, _d0);
_mm_storeu_ps(out_tm0+4, _d1);
_mm_storeu_ps(out_tm0+8, _d2);
_mm_storeu_ps(out_tm0+12, _d3);
#else
float d0[4],d1[4],d2[4],d3[4];
float w0[4],w1[4],w2[4],w3[4];
float t0[4],t1[4],t2[4],t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3];
}
// d = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n ] = d0[n];
out_tm0[n+ 4] = d1[n];
out_tm0[n+ 8] = d2[n];
out_tm0[n+12] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
}
}
|
conv1x1s1_sse.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv1x1s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q+3<inch; q+=4)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q+1);
const float* img2 = bottom_blob.channel(q+2);
const float* img3 = bottom_blob.channel(q+3);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float k1 = kernel0[1];
const float k2 = kernel0[2];
const float k3 = kernel0[3];
const float* r0 = img0;
const float* r1 = img1;
const float* r2 = img2;
const float* r3 = img3;
int size = outw * outh;
int remain = size;
for (; remain>0; remain--)
{
float sum = *r0 * k0;
float sum1 = *r1 * k1;
float sum2 = *r2 * k2;
float sum3 = *r3 * k3;
*outptr += sum + sum1 + sum2 + sum3;
r0++;
r1++;
r2++;
r3++;
outptr++;
}
}
for (; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch + q;
const float k0 = kernel0[0];
const float* r0 = img0;
int size = outw * outh;
int remain = size;
for (; remain>0; remain--)
{
float sum = *r0 * k0;
*outptr += sum;
r0++;
outptr++;
}
}
}
}
}
|
phpassMD5_fmt_plug.c | /*
* This software was written by Jim Fougeron jfoug AT cox dot net in 2009.
* No copyright is claimed, and the software is hereby placed in the public
* domain. In case this attempt to disclaim copyright and place the software in
* the public domain is deemed null and void, then the software is Copyright
* (c) 2009 Jim Fougeron and it is hereby released to the general public under
* the following terms:
*
* This software may be modified, redistributed, and used for any purpose,
* in source and binary forms, with or without modification.
*
* Cracks phpass 'portable' hashes, and phpBBv3 hashes, which are simply phpass
* portable, with a slightly different signature. These are 8 byte salted
* hashes, with a 1 byte 'salt' that defines the number of loops to compute.
* Internally we work with 8 byte salt (the 'real' salt), but let john track
* it as 9 byte salts to also pass in the loop count. Code works even if
* multiple loop count values within the input. PHPv5 kicked up the loop
* count, Wordpress uses same format, but even higher loop count. The loop
* count can be used to 'tune' the format, by asking to process only
* only hashes of a specific count.
*
* uses openSSL's MD5 and SIMD MD5.
*
* Code was pretty much rewritten to re-enable this format, and to deprecate
* dynamic_17. It required ported to use the new intrisic SIMD code, including
* AVX2, AVX2-512, and others, and the overall starting point for this older
* code was pretty bad. This port done August 2015, Jim Fougeron.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_phpassmd5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_phpassmd5);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "md5.h"
#include "phpass_common.h"
//#undef _OPENMP
//#undef SIMD_COEF_32
//#undef SIMD_PARA_MD5
#ifdef _OPENMP
#define OMP_SCALE 32
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "phpass"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "phpass ($P$ or $H$) " MD5_ALGORITHM_NAME
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5)
#endif
#define BENCHMARK_COMMENT " ($P$9)"
#ifndef MD5_BUF_SIZ
#define MD5_BUF_SIZ 16
#endif
#define DIGEST_SIZE 16
#define SALT_SIZE 8
// NOTE salts are only 8 bytes, but we tell john they are 9.
// We then take the 8 bytes of salt, and append the 1 byte of
// loop count data, making it 9.
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*MD5_BUF_SIZ*4*SIMD_COEF_32 )
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef SIMD_COEF_32
// hash with key appended (used on all steps other than first)
static uint32_t (*hash_key)[MD5_BUF_SIZ*NBKEYS];
// salt with key appended (only used in 1st step).
static uint32_t (*cursalt)[MD5_BUF_SIZ*NBKEYS];
static uint32_t (*crypt_key)[DIGEST_SIZE/4*NBKEYS];
static unsigned max_keys;
#else
static char (*crypt_key)[PHPASS_CPU_PLAINTEXT_LENGTH+1+PHPASS_BINARY_SIZE];
static char (*saved_key)[PHPASS_CPU_PLAINTEXT_LENGTH + 1];
static unsigned (*saved_len);
static unsigned char cursalt[SALT_SIZE];
#endif
static unsigned loopCnt;
static void init(struct fmt_main *self) {
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_32
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*crypt_key), MEM_ALIGN_SIMD);
hash_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*hash_key), MEM_ALIGN_SIMD);
cursalt = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*cursalt), MEM_ALIGN_SIMD);
max_keys = self->params.max_keys_per_crypt;
#else
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
#endif
}
static void done(void)
{
MEM_FREE(crypt_key);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
MEM_FREE(saved_key);
#else
MEM_FREE(hash_key);
MEM_FREE(cursalt);
#endif
}
static void set_salt(void *salt)
{
#ifdef SIMD_COEF_32
int i;
uint32_t *p;
p = cursalt[0];
for (i = 0; i < max_keys; ++i) {
if (i && (i&(SIMD_COEF_32-1)) == 0)
p += 15*SIMD_COEF_32;
p[0] = ((uint32_t *)salt)[0];
p[SIMD_COEF_32] = ((uint32_t *)salt)[1];
++p;
}
#else // !SIMD_COEF_32
memcpy(cursalt, salt, 8);
#endif
// compute the loop count for this salt
loopCnt = (1 << (atoi64[ARCH_INDEX(((char*)salt)[8])]));
}
static void set_key(char *key, int index) {
#ifdef SIMD_COEF_32
// in SIMD, we put the key into the cursalt (at offset 8),
// and into hash_key (at offset 16). We also clean both
// buffers, and put the 0x80, and the length into them.
int len = strlen(key), i, j;
unsigned char *co1 = (unsigned char*)cursalt;
unsigned char *co2 = (unsigned char*)hash_key;
for (i = 0; i < len; ++i) {
// byte by byte. Slow but easy to follow, and the
// speed here does not really matter.
co1[GETPOS(i+8,index)] = key[i];
co2[GETPOS(i+16,index)] = key[i];
}
// Place the end of string marker
co1[GETPOS(i+8,index)] = 0x80;
co2[GETPOS(i+16,index)] = 0x80;
// clean out both buffers top parts.
for (j = i+9; j < 56; ++j)
co1[GETPOS(j,index)] = 0;
for (j = i+17; j < 56; ++j)
co2[GETPOS(j,index)] = 0;
// set the length in bits of salt and hash
co1[GETPOS(56,index)] = ((len+8)<<3)&0xFF;
co2[GETPOS(56,index)] = ((len+16)<<3)&0xFF;
co1[GETPOS(57,index)] = ((len+8)<<3)>>8;
co2[GETPOS(57,index)] = ((len+16)<<3)>>8;
#else
int len= strlen(key);
saved_len[index]=len;
strcpy(saved_key[index], key);
#endif
}
static char *get_key(int index) {
#ifdef SIMD_COEF_32
unsigned char *saltb8 = (unsigned char*)cursalt;
static char out[PHPASS_CPU_PLAINTEXT_LENGTH+1];
int len, i;
// get salt length (in bits)
len = saltb8[GETPOS(57,index)];
len <<= 8;
len |= saltb8[GETPOS(56,index)];
// convert to bytes.
len >>= 3;
// we skip the 8 bytes of salt (to get to password).
len -= 8;
// now grab the password.
for (i = 0; i < len; ++i)
out[i] = saltb8[GETPOS(8+i,index)];
out[i] = 0;
return out;
#else
return saved_key[index];
#endif
}
static int cmp_all(void *binary, int count) {
unsigned i = 0;
#ifdef SIMD_COEF_32
uint32_t *p;
uint32_t bin = *(uint32_t *)binary;
p = crypt_key[0];
for (i = 0; i < count; ++i) {
if (i && (i&(SIMD_COEF_32-1)) == 0)
p += 3*SIMD_COEF_32;
if (bin == *p++)
return 1;
}
return 0;
#else
for (i = 0; i < count; i++)
if (!memcmp(binary, crypt_key[i], PHPASS_BINARY_SIZE))
return 1;
return 0;
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int cmp_one(void * binary, int index)
{
#ifdef SIMD_COEF_32
int idx = index&(SIMD_COEF_32-1);
int off = (index/SIMD_COEF_32)*(4*SIMD_COEF_32);
return((((uint32_t *)binary)[0] == ((uint32_t *)crypt_key)[off+0*SIMD_COEF_32+idx]) &&
(((uint32_t *)binary)[1] == ((uint32_t *)crypt_key)[off+1*SIMD_COEF_32+idx]) &&
(((uint32_t *)binary)[2] == ((uint32_t *)crypt_key)[off+2*SIMD_COEF_32+idx]) &&
(((uint32_t *)binary)[3] == ((uint32_t *)crypt_key)[off+3*SIMD_COEF_32+idx]));
#else
return !memcmp(binary, crypt_key[index], PHPASS_BINARY_SIZE);
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt) {
const int count = *pcount;
int loops = 1, index;
#ifdef _OPENMP
loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;
#pragma omp parallel for
#endif
for (index = 0; index < loops; index++)
{
unsigned Lcount;
#ifdef SIMD_COEF_32
SIMDmd5body(cursalt[index], hash_key[index], NULL, SSEi_OUTPUT_AS_INP_FMT);
Lcount = loopCnt-1;
do {
SIMDmd5body(hash_key[index], hash_key[index], NULL, SSEi_OUTPUT_AS_INP_FMT);
} while (--Lcount);
// last hash goes into crypt_key
SIMDmd5body(hash_key[index], crypt_key[index], NULL, 0);
#else
MD5_CTX ctx;
MD5_Init( &ctx );
MD5_Update( &ctx, cursalt, 8 );
MD5_Update( &ctx, saved_key[index], saved_len[index] );
MD5_Final( (unsigned char *) crypt_key[index], &ctx);
strcpy(((char*)&(crypt_key[index]))+PHPASS_BINARY_SIZE, saved_key[index]);
Lcount = loopCnt;
do {
MD5_Init( &ctx );
MD5_Update( &ctx, crypt_key[index], PHPASS_BINARY_SIZE+saved_len[index]);
MD5_Final( (unsigned char *)&(crypt_key[index]), &ctx);
} while (--Lcount);
#endif
}
return count;
}
static void * salt(char *ciphertext)
{
static union {
unsigned char salt[SALT_SIZE+2];
uint32_t x;
} x;
unsigned char *salt = x.salt;
// store off the 'real' 8 bytes of salt
memcpy(salt, &ciphertext[4], 8);
// append the 1 byte of loop count information.
salt[8] = ciphertext[3];
salt[9]=0;
return salt;
}
#ifdef SIMD_COEF_32
#define SIMD_INDEX (index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SIMD_COEF_32*4
static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_0; }
static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_1; }
static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_2; }
static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_3; }
static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_4; }
static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_5; }
static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_0; }
static int get_hash_1(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_1; }
static int get_hash_2(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_2; }
static int get_hash_3(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_3; }
static int get_hash_4(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_4; }
static int get_hash_5(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_5; }
static int get_hash_6(int index) { return ((uint32_t*)(crypt_key[index]))[0] & PH_MASK_6; }
#endif
static int salt_hash(void *salt)
{
return *((ARCH_WORD *)salt) & 0x3FF;
}
struct fmt_main fmt_phpassmd5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PHPASS_CPU_PLAINTEXT_LENGTH,
PHPASS_BINARY_SIZE,
PHPASS_BINARY_ALIGN,
SALT_SIZE+1,
PHPASS_SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
{
"iteration count",
},
{ FORMAT_TAG, FORMAT_TAG2, FORMAT_TAG3 },
phpass_common_tests_39
}, {
init,
done,
fmt_default_reset,
phpass_common_prepare,
phpass_common_valid,
phpass_common_split,
phpass_common_binary,
salt,
{
phpass_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__frexpe_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__frexpe_fp32_fp32)
// op(A') function: GB (_unop_tran__frexpe_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = GB_frexpef (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_frexpef (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = GB_frexpef (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FREXPE || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__frexpe_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = GB_frexpef (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = GB_frexpef (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__frexpe_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
channel.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC H H AAA N N N N EEEEE L %
% C H H A A NN N NN N E L %
% C HHHHH AAAAA N N N N N N EEE L %
% C H H A A N NN N NN E L %
% CCCC H H A A N N N N EEEEE LLLLL %
% %
% %
% MagickCore Image Channel Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright @ 2003 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a n n e l F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChannelFxImage() applies a channel expression to the specified image. The
% expression consists of one or more channels, either mnemonic or numeric (e.g.
% red, 1), separated by actions as follows:
%
% <=> exchange two channels (e.g. red<=>blue)
% => copy one channel to another channel (e.g. red=>green)
% = assign a constant value to a channel (e.g. red=50%)
% , write new image channels in the specified order (e.g. red, green)
% | add a new output image for the next set of channel operations
% ; move to the next input image for the source of channel data
%
% For example, to create 3 grayscale images from the red, green, and blue
% channels of an image, use:
%
% -channel-fx "red; green; blue"
%
% A channel without an operation symbol implies separate (i.e, semicolon).
%
% The format of the ChannelFxImage method is:
%
% Image *ChannelFxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A channel expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef enum
{
ExtractChannelOp,
AssignChannelOp,
ExchangeChannelOp,
TransferChannelOp
} ChannelFx;
static MagickBooleanType ChannelImage(Image *destination_image,
const PixelChannel destination_channel,const ChannelFx channel_op,
const Image *source_image,const PixelChannel source_channel,
const Quantum pixel,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
size_t
height,
width;
ssize_t
y;
status=MagickTrue;
source_view=AcquireVirtualCacheView(source_image,exception);
destination_view=AcquireAuthenticCacheView(destination_image,exception);
height=MagickMin(source_image->rows,destination_image->rows);
width=MagickMin(source_image->columns,destination_image->columns);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelTrait
destination_traits,
source_traits;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(destination_view,0,y,
destination_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
destination_traits=GetPixelChannelTraits(destination_image,
destination_channel);
source_traits=GetPixelChannelTraits(source_image,source_channel);
if ((destination_traits == UndefinedPixelTrait) ||
(source_traits == UndefinedPixelTrait))
continue;
for (x=0; x < (ssize_t) width; x++)
{
if (channel_op == AssignChannelOp)
SetPixelChannel(destination_image,destination_channel,pixel,q);
else
SetPixelChannel(destination_image,destination_channel,
GetPixelChannel(source_image,source_channel,p),q);
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(destination_image);
}
if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *ChannelFxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define ChannelFxImageTag "ChannelFx/Image"
ChannelFx
channel_op;
ChannelType
channel_mask;
char
token[MagickPathExtent];
const char
*p;
const Image
*source_image;
double
pixel;
Image
*destination_image;
MagickBooleanType
status;
PixelChannel
source_channel,
destination_channel;
ssize_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
source_image=image;
destination_image=CloneImage(source_image,0,0,MagickTrue,exception);
if (destination_image == (Image *) NULL)
return((Image *) NULL);
if (expression == (const char *) NULL)
return(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
pixel=0.0;
p=(char *) expression;
(void) GetNextToken(p,&p,MagickPathExtent,token);
channel_op=ExtractChannelOp;
for (channels=0; *token != '\0'; )
{
ssize_t
i;
/*
Interpret channel expression.
*/
switch (*token)
{
case ',':
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case '|':
{
if (GetNextImageInList(source_image) != (Image *) NULL)
source_image=GetNextImageInList(source_image);
else
source_image=GetFirstImageInList(source_image);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
case ';':
{
Image
*canvas;
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,
exception);
}
canvas=CloneImage(source_image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
{
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
AppendImageToList(&destination_image,canvas);
destination_image=GetLastImageInList(destination_image);
status=SetImageStorageClass(destination_image,DirectClass,exception);
if (status == MagickFalse)
{
destination_image=GetLastImageInList(destination_image);
return((Image *) NULL);
}
(void) GetNextToken(p,&p,MagickPathExtent,token);
channels=0;
destination_channel=RedPixelChannel;
channel_mask=UndefinedChannel;
break;
}
default:
break;
}
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
source_channel=(PixelChannel) i;
channel_op=ExtractChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '<')
{
channel_op=ExchangeChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '=')
{
if (channel_op != ExchangeChannelOp)
channel_op=AssignChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
if (*token == '>')
{
if (channel_op != ExchangeChannelOp)
channel_op=TransferChannelOp;
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
switch (channel_op)
{
case AssignChannelOp:
case ExchangeChannelOp:
case TransferChannelOp:
{
if (channel_op == AssignChannelOp)
pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0);
else
{
i=ParsePixelChannelOption(token);
if (i < 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnrecognizedChannelType","`%s'",token);
destination_image=DestroyImageList(destination_image);
return(destination_image);
}
}
destination_channel=(PixelChannel) i;
if (i >= (ssize_t) GetPixelChannels(destination_image))
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
if (image->colorspace != UndefinedColorspace)
switch (destination_channel)
{
case RedPixelChannel:
case GreenPixelChannel:
case BluePixelChannel:
case BlackPixelChannel:
case IndexPixelChannel:
break;
case AlphaPixelChannel:
{
destination_image->alpha_trait=BlendPixelTrait;
break;
}
case CompositeMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | CompositeMaskChannel);
break;
}
case ReadMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | ReadMaskChannel);
break;
}
case WriteMaskPixelChannel:
{
destination_image->channels=(ChannelType)
(destination_image->channels | WriteMaskChannel);
break;
}
case MetaPixelChannel:
default:
{
(void) SetPixelMetaChannels(destination_image,(size_t) (
destination_channel-GetPixelChannels(destination_image)+1),
exception);
break;
}
}
channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token));
if (((channels >= 1) || (destination_channel >= 1)) &&
(IsGrayColorspace(destination_image->colorspace) != MagickFalse))
(void) SetImageColorspace(destination_image,sRGBColorspace,exception);
(void) GetNextToken(p,&p,MagickPathExtent,token);
break;
}
default:
break;
}
status=ChannelImage(destination_image,destination_channel,channel_op,
source_image,source_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
if (channel_op == ExchangeChannelOp)
{
status=ChannelImage(destination_image,source_channel,channel_op,
source_image,destination_channel,ClampToQuantum(pixel),exception);
if (status == MagickFalse)
{
destination_image=DestroyImageList(destination_image);
break;
}
channels++;
}
switch (channel_op)
{
case ExtractChannelOp:
{
channel_mask=(ChannelType) (channel_mask |
(1UL << destination_channel));
destination_channel=(PixelChannel) (destination_channel+1);
break;
}
default:
break;
}
status=SetImageProgress(source_image,ChannelFxImageTag,p-expression,
strlen(expression));
if (status == MagickFalse)
break;
}
(void) SetPixelChannelMask(destination_image,channel_mask);
if ((channel_op == ExtractChannelOp) && (channels == 1))
{
(void) SetPixelMetaChannels(destination_image,0,exception);
(void) SetImageColorspace(destination_image,GRAYColorspace,exception);
}
return(GetFirstImageInList(destination_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *images,const ColorspaceType colorspace,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o colorspace: the image colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse)
{
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if (colorspace != UndefinedColorspace)
(void) SetImageColorspace(combine_image,colorspace,exception);
else
if (fabs(image->gamma-1.0) <= MagickEpsilon)
(void) SetImageColorspace(combine_image,RGBColorspace,exception);
else
(void) SetImageColorspace(combine_image,sRGBColorspace,exception);
switch (combine_image->colorspace)
{
case UndefinedColorspace:
case sRGBColorspace:
{
if (GetImageListLength(image) > 3)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case LinearGRAYColorspace:
case GRAYColorspace:
{
if (GetImageListLength(image) > 1)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
case CMYKColorspace:
{
if (GetImageListLength(image) > 4)
combine_image->alpha_trait=BlendPixelTrait;
break;
}
default:
break;
}
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireAuthenticCacheView(combine_image,exception);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
Quantum
*pixels;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
i;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++)
{
ssize_t
x;
PixelChannel channel = GetPixelChannelChannel(combine_image,i);
PixelTrait traits = GetPixelChannelTraits(combine_image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (next == (Image *) NULL)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
if (x < (ssize_t) next->columns)
{
q[i]=GetPixelIntensity(next,p);
p+=GetPixelChannels(next);
}
q+=GetPixelChannels(combine_image);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CombineImageTag,progress,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImage() separates a channel from the image and returns it as a
% grayscale image.
%
% The format of the SeparateImage method is:
%
% Image *SeparateImage(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImage(const Image *image,
const ChannelType channel_type,ExceptionInfo *exception)
{
#define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01)
#define SeparateImageTag "Separate/Image"
CacheView
*image_view,
*separate_view;
Image
*separate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize separate image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
separate_image=CloneImage(image,0,0,MagickTrue,exception);
if (separate_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse)
{
separate_image=DestroyImage(separate_image);
return((Image *) NULL);
}
separate_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(separate_image,GRAYColorspace,exception);
separate_image->gamma=image->gamma;
/*
Separate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
separate_view=AcquireAuthenticCacheView(separate_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(GetChannelBit(channel_type,channel) == 0))
continue;
SetPixelChannel(separate_image,GrayPixelChannel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(separate_image);
}
if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
separate_view=DestroyCacheView(separate_view);
image_view=DestroyCacheView(image_view);
(void) SetImageChannelMask(separate_image,DefaultChannels);
if (status == MagickFalse)
separate_image=DestroyImage(separate_image);
return(separate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% Image *SeparateImages(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception)
{
Image
*images,
*separate_image;
ssize_t
i;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
separate_image=SeparateImage(image,(ChannelType) (1UL << channel),
exception);
if (separate_image != (Image *) NULL)
AppendImageToList(&images,separate_image);
}
if (images == (Image *) NULL)
images=SeparateImage(image,UndefinedChannel,exception);
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelOption alpha_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel,
% DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel,
% OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel,
% and TransparentAlphaChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p,
const double alpha,const Quantum *q,const double beta,
Quantum *composite)
{
double
Da,
gamma,
Sa;
ssize_t
i;
/*
Compose pixel p over pixel q with the given alpha.
*/
Sa=QuantumScale*alpha;
Da=QuantumScale*beta,
gamma=Sa*(-Da)+Sa+Da;
gamma=PerceptibleReciprocal(gamma);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
switch (channel)
{
case RedPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->red,alpha));
break;
}
case GreenPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->green,alpha));
break;
}
case BluePixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->blue,alpha));
break;
}
case BlackPixelChannel:
{
composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta,
(double) p->black,alpha));
break;
}
case AlphaPixelChannel:
{
composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da));
break;
}
default:
break;
}
}
}
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelOption alpha_type,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
if (image->alpha_trait == BlendPixelTrait)
return(status);
image->alpha_trait=BlendPixelTrait;
break;
}
case AssociateAlphaChannel:
{
/*
Associate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=CopyPixelTrait;
return(status);
}
case BackgroundAlphaChannel:
{
/*
Set transparent pixels to background color.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,q) == TransparentAlpha)
{
SetPixelViaPixelInfo(image,&image->background_color,q);
SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case CopyAlphaChannel:
{
image->alpha_trait=UpdatePixelTrait;
status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0,
exception);
break;
}
case DeactivateAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=CopyPixelTrait;
break;
}
case DisassociateAlphaChannel:
{
/*
Disassociate alpha.
*/
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image->alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma,
Sa;
ssize_t
i;
Sa=QuantumScale*GetPixelAlpha(image,q);
gamma=PerceptibleReciprocal(Sa);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (channel == AlphaPixelChannel)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(gamma*q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=UndefinedPixelTrait;
return(status);
}
case DiscreteAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=UpdatePixelTrait;
break;
}
case ExtractAlphaChannel:
{
status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0,
exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OffAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
return(status);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case OnAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
image->alpha_trait=BlendPixelTrait;
break;
}
case OpaqueAlphaChannel:
{
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case RemoveAlphaChannel:
{
/*
Remove transparency.
*/
if (image->alpha_trait == UndefinedPixelTrait)
break;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
FlattenPixelInfo(image,&image->background_color,
image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->alpha_trait=image->background_color.alpha_trait;
break;
}
case SetAlphaChannel:
{
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlpha(image,OpaqueAlpha,exception);
break;
}
case ShapeAlphaChannel:
{
PixelInfo
background;
/*
Remove transparency.
*/
ConformPixelInfo(image,&image->background_color,&background,exception);
background.alpha_trait=BlendPixelTrait;
image->alpha_trait=BlendPixelTrait;
status=SetImageStorageClass(image,DirectClass,exception);
if (status == MagickFalse)
break;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=background;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel.alpha=GetPixelIntensity(image,q);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
break;
}
case TransparentAlphaChannel:
{
status=SetImageAlpha(image,TransparentAlpha,exception);
break;
}
case UndefinedAlphaChannel:
break;
}
if (status == MagickFalse)
return(status);
(void) SetPixelChannelMask(image,image->channel_mask);
return(SyncImagePixelCache(image,exception));
}
|
ImagePreviewUtils.h | #ifndef CAPTURE3_IMAGE_PREVIEW_UTILS_H
#define CAPTURE3_IMAGE_PREVIEW_UTILS_H
#include <cmath>
#include <omp.h>
#include <QtGui/QImage>
#include <QtGui/QPixmap>
#include "../engine/objects/image/ImageChannel.h"
#include "../engine/objects/image/ImageSize.h"
namespace Capture3
{
static QImage generatePreview(
const ImageChannel &imageChannel,
const unsigned int channel = 3,
const bool showShadowClipping = true,
const bool showHighlightClipping = true
)
{
// Get image data
const unsigned int imageArea = imageChannel.getSize().getArea();
const unsigned int imageWidth = imageChannel.getSize().getWidth();
const unsigned int imageHeight = imageChannel.getSize().getHeight();
const double *imageData = imageChannel.getData();
// Create image
QImage image(imageWidth, imageHeight, QImage::Format_ARGB32_Premultiplied);
// Get reference to QImage data
unsigned char *output = image.bits();
// Indexes
const unsigned int indexInputX = channel != 3 ? channel : 0;
const unsigned int indexInputY = channel != 3 ? channel : 1;
const unsigned int indexInputZ = channel != 3 ? channel : 2;
// Iterate over pixels
#pragma omp parallel for schedule(static)
for (unsigned int i = 0; i < imageArea; i++) {
// Indexes
const unsigned int indexInput = i * 3;
const unsigned int indexOutput = i * 4;
// Fetch color values
const double valueX = imageData[indexInput + indexInputX];
const double valueY = imageData[indexInput + indexInputY];
const double valueZ = imageData[indexInput + indexInputZ];
// Round value and limit
auto colorR = (int) lround(valueX * 255.0);
auto colorG = (int) lround(valueY * 255.0);
auto colorB = (int) lround(valueZ * 255.0);
colorR = colorR < 0 ? 0 : colorR > 255 ? 255 : colorR;
colorG = colorG < 0 ? 0 : colorG > 255 ? 255 : colorG;
colorB = colorB < 0 ? 0 : colorB > 255 ? 255 : colorB;
// Show shadow and highlight clipping
if (showShadowClipping) {
if (valueX < 0 || valueY < 0 || valueZ < 0) {
colorR = 0;
colorG = 0;
colorB = 255;
}
}
if (showHighlightClipping) {
if (valueX > 1 || valueY > 1 || valueZ > 1) {
colorR = 255;
colorG = 0;
colorB = 0;
}
}
// Store values
output[indexOutput + 0] = (unsigned char) colorB;
output[indexOutput + 1] = (unsigned char) colorG;
output[indexOutput + 2] = (unsigned char) colorR;
output[indexOutput + 3] = 255;
}
return image;
}
static QImage generatePreviewImage(
const ImageChannel &imageChannel,
const bool showShadowClipping = true,
const bool showHighlightClipping = true
)
{
return generatePreview(imageChannel, 3, showShadowClipping, showHighlightClipping);
}
static QImage generatePreviewImageX(
const ImageChannel &imageChannel,
const bool showShadowClipping = true,
const bool showHighlightClipping = true
)
{
return generatePreview(imageChannel, 0, showShadowClipping, showHighlightClipping);
}
static QImage generatePreviewImageY(
const ImageChannel &imageChannel,
const bool showShadowClipping = true,
const bool showHighlightClipping = true
)
{
return generatePreview(imageChannel, 1, showShadowClipping, showHighlightClipping);
}
static QImage generatePreviewImageZ(
const ImageChannel &imageChannel,
const bool showShadowClipping = true,
const bool showHighlightClipping = true
)
{
return generatePreview(imageChannel, 2, showShadowClipping, showHighlightClipping);
}
}
#endif // CAPTURE3_IMAGE_PREVIEW_UTILS_H
|
GB_unop__lnot_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__lnot_bool_bool)
// op(A') function: GB (_unop_tran__lnot_bool_bool)
// C type: bool
// A type: bool
// cast: bool cij = aij
// unaryop: cij = !aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CAST(z, aij) \
bool z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = aij ; \
Cx [pC] = !z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__lnot_bool_bool)
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = !z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
bool z = aij ;
Cx [p] = !z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__lnot_bool_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_fp32_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_uint32
// op(A') function: GB_tran__lnot_fp32_uint32
// C type: float
// A type: uint32_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_uint32
(
float *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
resourcemanager.c | #include "utilities/resourcemanager.h"
int resource_manager_init(struct ResourceManager* resource_manager, struct GPUAPI* gpu_api) {
audio_manager_init(&resource_manager->audio_manager);
texture_cache_init(&resource_manager->texture_cache);
struct XmlNode* texture_list_node = xml_parser_load_xml_file("./assets/textures/texturelist.xml");
const char* texture_list_key = NULL;
struct MapIter texture_list_iter = map_iter();
struct TextureSettingsBucket {
struct TextureSettings texture_settings[64];
char file_path[64][2048];
struct XmlNode* node[64];
} texture_settings_bucket = {0};
int total_texture_num = 0;
while ((texture_list_key = map_next(texture_list_node->child_nodes, &texture_list_iter)))
texture_settings_bucket.node[total_texture_num++] = array_list_get(*((struct ArrayList**)map_get(texture_list_node->child_nodes, texture_list_key)), 0);
#pragma omp parallel for schedule(dynamic)
for (int texture_num = 0; texture_num < total_texture_num; texture_num++) {
strcpy(texture_settings_bucket.file_path[texture_num], "./assets/textures/");
char* current_node_path = xml_node_get_data(xml_node_get_child(texture_settings_bucket.node[texture_num], "path"));
strcat(texture_settings_bucket.file_path[texture_num], current_node_path);
texture_settings_bucket.texture_settings[texture_num] = (struct TextureSettings){.path = texture_settings_bucket.file_path[texture_num], .filter_type = FILTER_LINEAR, .mode_type = MODE_CLAMP_TO_BORDER, .mip_maps_enabled = 1, .premultiplied_alpha = 0};
}
texture_cache_add_bulk(&resource_manager->texture_cache, gpu_api, total_texture_num, texture_settings_bucket.texture_settings);
xml_parser_delete(texture_list_node);
return 0;
}
void resource_manager_delete(struct ResourceManager* resource_manager, struct GPUAPI* gpu_api) {
texture_cache_delete(&resource_manager->texture_cache, gpu_api);
audio_manager_delete(&resource_manager->audio_manager);
}
|
comm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Copyright (c) 2015 by Contributors
*/
#ifndef MXNET_KVSTORE_COMM_H_
#define MXNET_KVSTORE_COMM_H_
#include <dmlc/omp.h>
#include <string>
#include <algorithm>
#include <utility>
#include <limits>
#include <vector>
#include <tuple>
#include <thread>
#include "mxnet/ndarray.h"
#include "gradient_compression.h"
#include "../ndarray/ndarray_function.h"
#include "../operator/tensor/sparse_retain-inl.h"
#include "./kvstore_utils.h"
namespace mxnet {
namespace kvstore {
/**
* \brief multiple device commmunication
*/
class Comm {
public:
Comm() {
pinned_ctx_ = Context::CPUPinned(0);
}
virtual ~Comm() { }
/**
* \brief init key with the data shape and storage shape
*/
virtual void Init(int key, const NDArrayStorageType stype,
const TShape& shape, int dtype = mshadow::kFloat32) = 0;
/**
* \brief returns src[0] + .. + src[src.size()-1]
*/
virtual const NDArray& Reduce(
int key, const std::vector<NDArray>& src, int priority) = 0;
/**
* \brief copy from src to dst[i] for every i
*/
virtual void Broadcast(
int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) = 0;
/**
* \brief broadcast src to dst[i] with target row_ids for every i
* \param key the identifier key for the stored ndarray
* \param src the source row_sparse ndarray to broadcast
* \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast,
where the row_ids are expected to be unique and sorted in row_id.data()
* \param priority the priority of the operation
*/
virtual void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) = 0;
/**
* \brief return a pinned contex
*/
Context pinned_ctx() const {
return pinned_ctx_;
}
/**
* \brief Sets gradient compression parameters to be able to
* perform reduce with compressed gradients
*/
void SetGradientCompression(std::shared_ptr<GradientCompression> gc) {
gc_ = gc;
}
protected:
Context pinned_ctx_;
std::shared_ptr<GradientCompression> gc_;
};
/**
* \brief an implemention of Comm that first copy data to CPU memeory, and then
* reduce there
*/
class CommCPU : public Comm {
public:
CommCPU() {
nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4);
bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000);
// TODO(junwu) delete the following data member, now for benchmark only
is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0);
}
virtual ~CommCPU() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int type = mshadow::kFloat32) override {
// Delayed allocation - the dense merged buffer might not be used at all if push()
// only sees sparse arrays
bool delay_alloc = true;
merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type);
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
auto& buf = merge_buf_[key];
const auto stype = src[0].storage_type();
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
if (stype == kDefaultStorage) {
return src[0];
} else {
// With 'local' kvstore, we could store the weight on CPU while compute
// the gradient on GPU when the weight is extremely large.
// To avoiding copying the weight to the same context of the gradient,
// we always copy the gradient to merged buf.
NDArray& merged = buf.merged_buf(stype);
CopyFromTo(src[0], &merged, priority);
return merged;
}
}
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
std::vector<Engine::VarHandle> const_vars(src.size() - 1);
std::vector<NDArray> reduce(src.size());
CopyFromTo(src[0], &buf_merged, priority);
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size()-1);
for (size_t j = 0; j < src.size() - 1; ++j) {
// allocate copy buffer
buf.copy_buf[j] = NDArray(
src[0].shape(), pinned_ctx_, false, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 1; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority);
reduce[i] = buf.copy_buf[i-1];
const_vars[i-1] = reduce[i].var();
}
Engine::Get()->PushAsync(
[reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
ReduceSumCPU(reduce);
on_complete();
}, Context::CPU(), const_vars, {reduce[0].var()},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
} else {
// sparse reduce
std::vector<Engine::VarHandle> const_vars(src.size());
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(
src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
const_vars[i] = reduce[i].var();
}
Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(),
ResourceRequest(ResourceRequest::kTempSpace));
Engine::Get()->PushAsync(
[reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
NDArray out = buf_merged;
is_serial_push_?
ReduceSumCPUExSerial(reduce, &out)
: mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out);
on_complete();
}, Context::CPU(), const_vars, {buf_merged.var(), rsc.var},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
}
return buf_merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
int mask = src.ctx().dev_mask();
if (mask == Context::kCPU) {
for (auto d : dst) CopyFromTo(src, d, priority);
} else {
// First copy data to pinned_ctx, then broadcast.
// Note that kv.init initializes the data on pinned_ctx.
// This branch indicates push() with ndarrays on gpus were called,
// and the source is copied to gpu ctx.
// Also indicates that buffers are already initialized during push().
auto& buf = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf, priority);
for (auto d : dst) CopyFromTo(buf, d, priority);
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
using namespace mshadow;
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
CHECK_EQ(src.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with src on gpu context not supported";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with row_indices on gpu context not supported";
// retain according to unique indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, src.shape(), src.ctx(), true,
src.dtype(), src.aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
Engine::Get()->PushAsync(
[=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
NDArray temp = retained_cpu; // get rid the of const qualifier
op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo,
&temp);
on_complete();
}, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()},
FnProperty::kNormal, priority, "KVStoreSparseRetain");
// if retained_cpu == out, CopyFromTo will ignore the copy operation
CopyFromTo(retained_cpu, out, priority);
}
}
private:
// reduce sum into val[0]
inline void ReduceSumCPU(const std::vector<NDArray> &in_data) {
MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, {
std::vector<DType*> dptr(in_data.size());
for (size_t i = 0; i < in_data.size(); ++i) {
TBlob data = in_data[i].data();
CHECK(data.CheckContiguous());
dptr[i] = data.FlatTo2D<cpu, DType>().dptr_;
}
size_t total = in_data[0].shape().Size();
ReduceSumCPUImpl(dptr, total);
});
}
// serial implementation of reduce sum for row sparse NDArray.
inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) {
using namespace rowsparse;
using namespace mshadow;
auto stype = out->storage_type();
CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype;
size_t total_num_rows = 0;
size_t num_in = in.size();
// skip the ones with empty indices and values
std::vector<bool> skip(num_in, false);
// the values tensor of the inputs
MSHADOW_TYPE_SWITCH(out->dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, {
std::vector<Tensor<cpu, 2, DType>> in_vals(num_in);
std::vector<Tensor<cpu, 1, IType>> in_indices(num_in);
// offset to the values tensor of all inputs
std::vector<size_t> offsets(num_in, 0);
std::vector<size_t> num_rows(num_in, 0);
for (size_t i = 0; i < num_in; i++) {
if (!in[i].storage_initialized()) {
skip[i] = true;
continue;
}
auto size = in[i].aux_shape(kIdx).Size();
num_rows[i] = size;
total_num_rows += size;
in_vals[i] = in[i].data().FlatTo2D<cpu, DType>();
in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>();
}
std::vector<IType> indices;
indices.reserve(total_num_rows);
// gather indices from all inputs
for (size_t i = 0; i < num_in; i++) {
for (size_t j = 0; j < num_rows[i]; j++) {
indices.emplace_back(in_indices[i][j]);
}
}
CHECK_EQ(indices.size(), total_num_rows);
// dedup indices
std::sort(indices.begin(), indices.end());
indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin());
// the one left are unique non-zero rows
size_t nnr = indices.size();
// allocate memory for output
out->CheckAndAlloc({Shape1(nnr)});
auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>();
auto val_data = out->data().FlatTo2D<cpu, DType>();
for (size_t i = 0; i < nnr; i++) {
// copy indices back
idx_data[i] = indices[i];
bool zeros = true;
for (size_t j = 0; j < num_in; j++) {
if (skip[j]) continue;
size_t offset = offsets[j];
if (offset < num_rows[j]) {
if (indices[i] == in_indices[j][offset]) {
if (zeros) {
Copy(val_data[i], in_vals[j][offset], nullptr);
zeros = false;
} else {
val_data[i] += in_vals[j][offset];
}
offsets[j] += 1;
}
}
}
}
});
});
}
template<typename DType>
inline static void ReduceSumCPU(
const std::vector<DType*> &dptr, size_t offset, index_t size) {
using namespace mshadow; // NOLINT(*)
Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size));
for (size_t i = 1; i < dptr.size(); i+=4) {
switch (dptr.size() - i) {
case 1: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
in_0 += in_1;
break;
}
case 2: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
in_0 += in_1 + in_2;
break;
}
case 3: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3;
break;
}
default: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3 + in_4;
break;
}
}
}
}
template<typename DType>
inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) {
const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10));
long ntask = (total + step - 1) / step; // NOLINT(*)
if (total < bigarray_bound_ || nthread_reduction_ <= 1) {
ReduceSumCPU(dptr, 0, total);
} else {
#pragma omp parallel for schedule(static) num_threads(nthread_reduction_)
for (long j = 0; j < ntask; ++j) { // NOLINT(*)
size_t k = static_cast<size_t>(j);
size_t begin = std::min(k * step, total);
size_t end = std::min((k + 1) * step, total);
if (j == ntask - 1) CHECK_EQ(end, total);
ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin));
}
}
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the merged value
NDArray merged;
/// \brief the cpu buffer for gpu data
std::vector<NDArray> copy_buf;
/// \brief the merged buffer for the given storage type
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
size_t bigarray_bound_;
int nthread_reduction_;
bool is_serial_push_;
};
/**
* \brief an implementation of Comm that performs reduction on device
* directly.
*
* It is faster if the total device-to-device bandwidths is larger than
* device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device
* memory.
*/
class CommDevice : public Comm {
public:
CommDevice() {
inited_ = false;
}
virtual ~CommDevice() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int dtype = mshadow::kFloat32) override {
sorted_key_attrs_.emplace_back(key, shape, dtype);
inited_ = false;
}
void InitBuffersAndComm(const std::vector<NDArray>& src) {
if (!inited_) {
std::vector<Context> devs;
for (const auto& a : src) {
devs.push_back(a.ctx());
}
InitMergeBuffer(devs);
if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) {
EnableP2P(devs);
}
}
}
const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src,
int priority) {
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
const NDArrayStorageType stype = src[0].storage_type();
NDArray& buf_merged = buf.merged_buf(stype);
if (buf.copy_buf.empty()) {
// initialize buffer for copying during reduce
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype());
}
}
CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf_merged, priority);
return buf_merged;
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
// when this reduce is called from kvstore_dist, gc is not set
// we don't do compression twice in dist_sync_device
if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) {
return ReduceCompressed(key, src, priority);
}
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
return src[0];
}
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
const NDArrayStorageType stype = src[0].storage_type();
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
CopyFromTo(src[0], &buf_merged, priority);
std::vector<NDArray> reduce(src.size());
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
// TODO(mli) this results in large device memory usage for huge ndarray,
// such as the largest fullc in VGG. consider to do segment reduce with
// NDArray.Slice or gpu direct memory access. for the latter, we need to
// remove some ctx check, and also it reduces 20% perf
buf.copy_buf.resize(src.size()-1);
for (size_t i = 0; i < src.size()-1; ++i) {
buf.copy_buf[i] = NDArray(
buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype());
}
}
for (size_t i = 0; i < src.size()-1; ++i) {
CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority);
reduce[i+1] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf_merged, priority);
} else {
// sparse reduce
buf_merged = ReduceRowSparse(key, src, priority);
}
return buf_merged;
}
const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src,
int priority) {
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
// one buf for each context
buf.copy_buf.resize(src.size());
buf.compressed_recv_buf.resize(src.size());
buf.compressed_send_buf.resize(src.size());
buf.residual.resize(src.size());
for (size_t i = 0; i < src.size(); ++i) {
buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(),
false, buf.merged.dtype());
buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(),
false, buf.merged.dtype());
buf.residual[i] = 0;
int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size());
buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(),
false, buf.merged.dtype());
buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(),
false, buf.merged.dtype());
}
}
for (size_t i = 0; i < src.size(); ++i) {
// compress before copy
// this is done even if the data is on same context as copy_buf because
// we don't want the training to be biased towards data on this GPU
gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority);
if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) {
CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority);
} else {
// avoid memory copy when they are on same context
buf.compressed_recv_buf[i] = buf.compressed_send_buf[i];
}
gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf.merged);
return buf.merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
if (!inited_) {
// copy to a random device first
int dev_id = key % dst.size();
CopyFromTo(src, dst[dev_id], priority);
for (size_t i = 0; i < dst.size(); ++i) {
if (i != static_cast<size_t>(dev_id)) {
CopyFromTo(*dst[dev_id], dst[i], priority);
}
}
} else {
auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf_merged, priority);
for (auto d : dst) {
CopyFromTo(buf_merged, d, priority);
}
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx(), src.ctx())
<< "row_id and src are expected to be on the same context";
// retain according to indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, out->shape(), src.ctx(), true,
out->dtype(), out->aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask;
Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
using namespace mxnet::common;
NDArray temp = retained_gpu;
switch (temp.ctx().dev_mask()) {
case cpu::kDevMask: {
SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo, &temp);
break;
}
#if MXNET_USE_CUDA
case gpu::kDevMask: {
SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(),
src, indices, kWriteTo, &temp);
// wait for GPU operations to complete
rctx.get_stream<gpu>()->Wait();
break;
}
#endif
default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR;
}
on_complete();
}, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()},
is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized,
priority, "KVStoreSparseRetain");
CopyFromTo(retained_gpu, out, priority);
}
}
using KeyAttrs = std::tuple<int, TShape, int>;
// try to allocate buff on device evenly
void InitMergeBuffer(const std::vector<Context>& devs) {
std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), [](
const KeyAttrs& a, const KeyAttrs& b) {
return std::get<1>(a).Size() > std::get<1>(b).Size();
});
std::unordered_map<int, std::pair<Context, size_t>> ctx_info;
for (auto d : devs) {
ctx_info[d.dev_id] = std::make_pair(d, 0);
}
for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) {
const int key = std::get<0>(sorted_key_attrs_[i]);
const TShape& shape = std::get<1>(sorted_key_attrs_[i]);
const int type = std::get<2>(sorted_key_attrs_[i]);
auto& buf = merge_buf_[key];
Context ctx;
size_t min_size = std::numeric_limits<size_t>::max();
for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) {
size_t size = it->second.second;
if (size <= min_size) {
ctx = it->second.first;
min_size = size;
}
}
// Delayed allocation - as the dense merged buffer might not be used at all if push()
// only sees sparse arrays
if (buf.merged.is_none()) {
bool delay_alloc = true;
buf.merged = NDArray(shape, ctx, delay_alloc, type);
}
ctx_info[ctx.dev_id].second += shape.Size();
}
inited_ = true;
}
private:
void EnableP2P(const std::vector<Context>& devs) {
#if MXNET_USE_CUDA
std::vector<int> gpus;
for (const auto& d : devs) {
if (d.dev_mask() == gpu::kDevMask) {
gpus.push_back(d.dev_id);
}
}
int n = static_cast<int>(gpus.size());
int enabled = 0;
std::vector<int> p2p(n*n);
for (int i = 0; i < n; ++i) {
cudaSetDevice(gpus[i]);
for (int j = 0; j < n; j++) {
int access;
cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]);
if (access) {
cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0);
if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) {
++enabled;
p2p[i*n+j] = 1;
}
}
}
}
if (enabled != n*(n-1)) {
// print warning info if not fully enabled
LOG(WARNING) << "only " << enabled << " out of "
<< n*(n-1) << " GPU pairs are enabled direct access. "
<< "It may affect the performance. "
<< "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off";
std::string access(n, '.');
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
access[j] = p2p[i*n+j] ? 'v' : '.';
}
LOG(WARNING) << access;
}
}
#endif
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the dense merged value for reduce and broadcast operations
NDArray merged;
/// \brief the gpu buffer for copy during reduce operation
std::vector<NDArray> copy_buf;
/// \brief the residual buffer for gradient compression
std::vector<NDArray> residual;
/// \brief the small buffer for compressed data in sender
std::vector<NDArray> compressed_send_buf;
/// \brief the small buffer for compressed data in receiver
std::vector<NDArray> compressed_recv_buf;
/// \brief the merged buffer for the given storage type (could be either dense or row_sparse)
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
CHECK(!merged.is_none()) << "unintialized merge buffer detected";
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value for reduce and rowsparse broadcast operations
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
public:
bool inited_;
std::vector<KeyAttrs> sorted_key_attrs_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_COMM_H_
|
core_histogram.h | #pragma once
#include "util/serialization/pretty_print.h"
#include "util.h"
#include <cassert>
#include <vector>
template<typename T>
void core_val_histogram(int n, T &core, bool is_print = false, int num_bins = 5) {
assert(num_bins >= 1);
// core-value histogram
int max_core_val = 0;
vector<int32_t> histogram;
#pragma omp parallel
{
#pragma omp for reduction(max:max_core_val)
for (auto u = 0; u < n; u++) {
max_core_val = max(max_core_val, core[u]);
}
#pragma omp single
{
log_info("max value: %d", max_core_val);
histogram = vector<int32_t>(max_core_val + 1, 0);
}
#pragma omp for
for (auto u = 0; u < n; u++) {
auto core_val = core[u];
#pragma omp atomic
histogram[core_val]++;
}
}
if (is_print) {
if (histogram.size() < 400) {
stringstream ss;
ss << pretty_print_array(&histogram.front(), histogram.size());
log_info("values histogram: %s", ss.str().c_str());
} else {
{
stringstream ss;
ss << pretty_print_array(&histogram.front(), 100);
log_info("first100 values histogram: %s", ss.str().c_str());
}
{
stringstream ss;
ss << pretty_print_array(&histogram.front() + histogram.size() - 100, 100);
log_info("last100 values histogram: %s", ss.str().c_str());
}
}
}
auto &bins = histogram;
auto bin_cnt = 0;
int64_t acc = 0;
auto thresh = n / num_bins;
auto last = 0;
for (auto i = 0; i < histogram.size(); i++) {
if (bins[i] > 0) {
bin_cnt++;
acc += bins[i];
if (acc > thresh || i == histogram.size() - 1) {
log_info("bin[%d - %d]: %s", last, i, FormatWithCommas(acc).c_str());
last = i + 1;
acc = 0;
}
}
}
log_info("Reversed Bins...");
last = histogram.size() - 1;
acc = 0;
for (int32_t i = histogram.size() - 1; i > -1; i--) {
if (bins[i] > 0) {
bin_cnt++;
acc += bins[i];
if (acc > thresh || i == 0) {
log_info("bin[%d - %d]: %s", i, last, FormatWithCommas(acc).c_str());
last = i + 1;
acc = 0;
}
}
}
log_info("total bin counts: %d", bin_cnt);
} |
conv_direct_hcl_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "convolution_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v)
{
int8_t* ptr = input;
int8_t* outptr = output;
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
// fill center
for (; y < (top + in_h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (in_w < 12)
{
for (; x < (left + in_w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, in_w * sizeof(int8_t));
x += in_w;
}
for (; x < out_w; x++)
{
outptr[x] = v;
}
ptr += in_w;
outptr += out_w;
}
// fill bottom
for (; y < out_h; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
}
static int conv3x3s1_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_param* param, int num_thread)
{
int inch = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_hw = inh * inw;
int outch = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_size = output_tensor->elem_num;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t));
memset(output_int32, 0, out_size * sizeof(int32_t));
float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float));
int8_t* output_int8 = output_tensor->data;
int8_t* input_int8 = input_tensor->data;
int32_t* bias_int32 = NULL;
if(bias_tensor)
bias_int32 = bias_tensor->data;
/* get scale value of quantizaiton */
float input_scale = input_tensor->scale;
float* kernel_scales = weight_tensor->scale_list;
float output_scale = output_tensor->scale;
const signed char* kernel = weight_tensor->data;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
int8_t* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input_int8;
else
{
input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t));
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < inch; g++)
{
int8_t* pad_in = input_int8 + g * inh * inw;
int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0);
}
}
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
int32_t* out0 = output_int32 + p * out_hw;
int8_t* kernel0 = (int8_t* )kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp;
int8_t* r0 = img0;
int8_t* r1 = img0 + inw_tmp;
int8_t* r2 = img0 + inw_tmp * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += ( int )r0[0] * kernel0[0];
sum0 += ( int )r0[1] * kernel0[1];
sum0 += ( int )r0[2] * kernel0[2];
sum0 += ( int )r1[0] * kernel0[3];
sum0 += ( int )r1[1] * kernel0[4];
sum0 += ( int )r1[2] * kernel0[5];
sum0 += ( int )r2[0] * kernel0[6];
sum0 += ( int )r2[1] * kernel0[7];
sum0 += ( int )r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (bias_tensor)
output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_int32);
sys_free(output_fp32);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
static int conv3x3s2_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_param* param, int num_thread)
{
int inch = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_hw = inh * inw;
int outch = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_size = output_tensor->elem_num;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t));
memset(output_int32, 0, out_size * sizeof(int32_t));
float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float));
int8_t* output_int8 = output_tensor->data;
int8_t* input_int8 = input_tensor->data;
int32_t* bias_int32 = NULL;
if(bias_tensor)
bias_int32 = bias_tensor->data;
/* get scale value of quantizaiton */
float input_scale = input_tensor->scale;
float* kernel_scales = weight_tensor->scale_list;
float output_scale = output_tensor->scale;
const signed char* kernel = weight_tensor->data;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
int8_t* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input_int8;
else
{
input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t));
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < inch; g++)
{
int8_t* pad_in = input_int8 + g * inh * inw;
int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0);
}
}
int tailstep = inw_tmp - 2 * outw + inw_tmp;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
int32_t* out0 = output_int32 + p * out_hw;
int8_t* kernel0 = (int8_t* )kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp;
int8_t* r0 = img0;
int8_t* r1 = img0 + inw_tmp;
int8_t* r2 = img0 + inw_tmp * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += ( int )r0[0] * kernel0[0];
sum0 += ( int )r0[1] * kernel0[1];
sum0 += ( int )r0[2] * kernel0[2];
sum0 += ( int )r1[0] * kernel0[3];
sum0 += ( int )r1[1] * kernel0[4];
sum0 += ( int )r1[2] * kernel0[5];
sum0 += ( int )r2[0] * kernel0[6];
sum0 += ( int )r2[1] * kernel0[7];
sum0 += ( int )r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (bias_tensor)
output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_int32);
sys_free(output_fp32);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* weight_tensor;
struct tensor* bias_tensor = NULL;
struct tensor* output_tensor = NULL;
int num_thread = exec_graph->num_thread;
/* set the input data and shape again, in case of reshape or dynamic shape */
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
if (ir_node->input_num > 2)
bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem;
int ret = -1;
switch(conv_param->stride_h)
{
case 1:
ret = conv3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread);
break;
case 2:
ret = conv3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread);
break;
default:
TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", conv_param->stride_h);
}
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem;
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int pad_h1 = param->pad_h1;
int pad_w1 = param->pad_w1;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
/* only support int8 */
if (input_tensor->data_type != TENGINE_DT_INT8)
return 0;
if (group == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 &&
((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2)))
return OPS_SCORE_BEST * 2;
else
return 0;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_conv_direct_hcl_x86_op()
{
return register_builtin_node_ops(OP_CONV, &hcl_node_ops);
}
int unregister_conv_direct_hcl_x86_op()
{
unregister_builtin_node_ops(OP_CONV, &hcl_node_ops);
return 0;
}
|
DRB012-minusminus-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The -- operation is not protected, causing race condition.
Data race pair: numNodes2@75 vs. numNodes2@75
*/
#include <stdlib.h>
#include <omp.h>
int main(int argc,char *argv[])
{
int i;
int len = 100;
if (argc > 1)
len = atoi(argv[1]);
int numNodes = len;
int numNodes2 = 0;
int x[len];
#pragma omp parallel for private (i) firstprivate (len)
for (i = 0; i <= len - 1; i += 1) {
if (i % 2 == 0)
x[i] = 5;
else
x[i] = - 5;
}
#pragma omp parallel for private (i) reduction (+:numNodes2)
for (i = numNodes - 1; i >= 0; i += -1) {
if (x[i] <= 0) {
numNodes2 += - 1;
}
}
printf("%d\n",numNodes2);
return 0;
}
|
task_depend_omp.c | /* --- File task_depend_omp.c --- */
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char **argv)
{
int N = 8;
int x[N][N], y[N][N];
int i, j;
/* Initialize x,y */
for (i = 0; i < N; i++)
{
x[0][i] = x[i][0] = y[0][i] = y[i][0] = i;
}
/* Serial computation */
for (i = 1; i < N; i++)
{
for (j = 1; j < N; j++)
x[i][j] = x[i - 1][j] + x[i][j - 1];
}
/* Parallel computation */
#pragma omp parallel
#pragma omp single
for (i = 1; i < N; i++)
{
for (j = 1; j < N; j++)
#pragma omp task depend(out:y)
y[i][j] = y[i - 1][j] + y[i][j - 1];
}
printf("Serial result:\n");
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
printf("%6d", x[i][j]);
printf("\n");
}
printf("Parallel result:\n");
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
printf("%6d", y[i][j]);
printf("\n");
}
}
|
DRACC_OMP_005_Counter_no_lock_Inter_yes.c | /*
Concurrent access on a counter with no lock. Atomicity Violation. Data Race in line 14. Inter Region.
*/
#include <stdio.h>
#define N 100000
int countervar = 0;
int count(){
#pragma omp target map(tofrom:countervar) device(0)
#pragma omp teams distribute
for (int i=0; i<N; i++){
countervar++;
}
return 0;
}
int main(){
count();
printf("counter: %i expected: 100000\n ",countervar);
return 0;
} |
GB_unaryop__minv_bool_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_uint32
// op(A') function: GB_tran__minv_bool_uint32
// C type: bool
// A type: uint32_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_uint32
(
bool *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
trmv_x_csc_n_lo_trans.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <string.h>
#include <memory.h>
static alphasparse_status_t
trmv_csc_n_lo_trans_unroll4(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y,
ALPHA_INT lrs,
ALPHA_INT lre)
{
ALPHA_INT m = A->cols;
for (ALPHA_INT i = lrs; i < lre; i++)
{
register ALPHA_Number tmp0;
register ALPHA_Number tmp1;
register ALPHA_Number tmp2;
register ALPHA_Number tmp3;
alpha_setzero(tmp0);
alpha_setzero(tmp1);
alpha_setzero(tmp2);
alpha_setzero(tmp3);
ALPHA_INT pks = A->cols_start[i];
ALPHA_INT pke = A->cols_end[i];
ALPHA_INT pkl = pke - pks;
ALPHA_INT pkl4 = pkl - 4;
ALPHA_INT row_ind0, row_ind1, row_ind2, row_ind3;
ALPHA_Number *A_val = &A->values[pks];
ALPHA_INT *A_row = &A->row_indx[pks];
ALPHA_INT pi;
for (pi = 0; pi < pkl4; pi += 4)
{
row_ind0 = A_row[pi];
row_ind1 = A_row[pi + 1];
row_ind2 = A_row[pi + 2];
row_ind3 = A_row[pi + 3];
if (row_ind0 >= i){
alpha_madde(tmp0, A_val[pi], x[row_ind0]);
alpha_madde(tmp1, A_val[pi+1], x[row_ind1]);
alpha_madde(tmp2, A_val[pi+2], x[row_ind2]);
alpha_madde(tmp3, A_val[pi+3], x[row_ind3]);
}else if (row_ind1 >= i){
alpha_madde(tmp1, A_val[pi+1], x[row_ind1]);
alpha_madde(tmp2, A_val[pi+2], x[row_ind2]);
alpha_madde(tmp3, A_val[pi+3], x[row_ind3]);
}else if (row_ind2 >= i){
alpha_madde(tmp2, A_val[pi+2], x[row_ind2]);
alpha_madde(tmp3, A_val[pi+3], x[row_ind3]);
}else if (row_ind3 >= i){
alpha_madde(tmp3, A_val[pi+3], x[row_ind3]);
}
}
for (; pi < pkl; pi += 1)
{
if (A_row[pi] >= i)
{
alpha_madde(tmp0, A_val[pi], x[A_row[pi]]);
}
}
alpha_add(tmp0, tmp0, tmp1);
alpha_add(tmp2, tmp2, tmp3);
alpha_add(tmp0, tmp0, tmp2);
alpha_mul(tmp0, tmp0, alpha);
alpha_mul(tmp1, beta, y[i]);
alpha_add(y[i], tmp0, tmp1);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
static alphasparse_status_t
trmv_csc_n_lo_trans_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
ALPHA_INT n = A->cols;
ALPHA_INT num_threads = alpha_get_thread_num();
ALPHA_INT partition[num_threads + 1];
balanced_partition_row_by_nnz(A->cols_end, n, num_threads, partition);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT local_n_s = partition[tid];
ALPHA_INT local_n_e = partition[tid + 1];
trmv_csc_n_lo_trans_unroll4(alpha,A,x,beta,y,local_n_s,local_n_e);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return trmv_csc_n_lo_trans_omp(alpha, A, x, beta, y);
}
|
morph_library.h | #ifndef MORPH_LIBRARY_H
#define MORPH_LIBRARY_H
#include "CImgFloatWrapper.h"
#include "loewner_declaration.h"
#include "conversions.h"
#include "morph_color_matrix.h"
#include "morph_circle.h"
#include "morph_smallest_circle_mask.h"
#include "einstein_operations.h"
#include "omp.h"
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <errno.h>
#include <sys/time.h>
#include <string.h>
#define open_file(file_ptr, file_name, mode)\
do {\
if (((file_ptr) = fopen((file_name), (mode))) == NULL) {\
fprintf(stderr, "%s:%d: error while opening file %s: %s\n", __FILE__, __LINE__, (file_name), strerror(errno));\
exit(EXIT_FAILURE);\
}\
} while(0)
#define close_file(file_ptr, file_name)\
do {\
if (fclose((file_ptr)) != 0) {\
fprintf(stderr, "%s:%d: error while closing file %s: %s\n", __FILE__, __LINE__, (file_name), strerror(errno));\
exit(EXIT_FAILURE);\
}\
} while(0)
/*
*==============================================================================================================
* Class that contains morphological operations which are introduced in the paper of B. Burgeth and A. Kleefeld
*==============================================================================================================
*/
class LoewnerMorphology::Morph {
public:
/*
* Constructor of the class Morph. It takes the name of the file where the image is stored, the name of the file where a
* structuring element (a mask) is stored and dimension of the structuring elements as arguments.
*/
Morph(const char *imageFile, const char *maskFile, int maskDim, int numberOfThreads = 8);
/*
* Destructor of the class Morph.
*/
~Morph();
// MORPHOLOGICAL OPERATION
/*
* Performs morphological opration dilation on the input image.
*/
void dilation(int iter = 1);
/*
* Performs morphological operation erosion on the input image.
*/
void erosion(int iter = 1);
/*
* Performs morphological operation closing on the input image.
*/
void closing(int iter = 1);
/*
* Performs morphological operation opening on the input image.
*/
void opening(int iter = 1);
/*
* Performs morphological operation black top hat on the input image.
*/
void blackTopHat(int iter = 1);
/*
* Performs morphological operation white top hat on the input image.
*/
void whiteTopHat(int iter = 1);
/*
* Performs morphological operation self-dual top hat on the input image.
*/
void selfDualTopHat(int iter = 1);
/*
* Performs morphological operation beucher gradient on the input image.
*/
void beucherGradient(int iter = 1);
/*
* Performs morphological operation internal gradient on the input image.
*/
void externalGradient(int iter = 1);
/*
* Performs morphological operation internal gradient on the input image.
*/
void internalGradient(int iter = 1);
/*
* Performs morphological operation morphological laplacian on the input image.
*/
void laplacian(int iter = 1);
/*
* Performs morphological operation shock filter on the input image.
*/
void shockFilter(int iter = 1);
/*
* Displays the original image.
*/
void displayOriginalImage();
/*
* Displays the result of the morphological operation if the operation was called.
*/
void displayResultImage();
/*
* Returns the result image as an array of floats. It allocates memory equal to the size of the image times spectrum.
*/
float *returnResult();
/*
* Saves the result image to the file which name is provided as a filename argument.
*/
void saveResult(const char *fileName);
private:
using Circle = LoewnerMorphology::MorphCircle;
CImgFloatWrapper *inputImage; // input image
CImgFloatWrapper *outputImage; // output image - after morphological operation
int *mask; // mask array
int padding; // mask padding
LoewnerMorphology::MorphColorMatrix *matrices; // input image converted to array of MorphColorMatrix objects
LoewnerMorphology::MorphColorMatrix *result; // result image converted to array of MorphColorMatrix objects
int width; // width of the image
int height; // height of the image
int spectrum; // spectrum of the image
int size; // size of the image
// HANDLERS
/*
* Method that performes modified commutative Einstein subtraction of two images that are given on memory lications image1 and
* image2. Both images have the size width * hight, with respective leading dimensions, lda1 and lda2. Precisely, the operation
* image3 = image1 - image2 is perfomed. The result is stored on memory location image3, with leading dimension lda3.
*/
template<typename T>
static void morph_einstein_launcher(T *image1, T *image2, T *imageResult, int width, int height, int lda1, int lda2, int lda3);
/*
* The method responsible for calculating morphological operations on a (2 * padding + 1)-dimensional squared matrix stored on
* memory location start. Argument pWidth is the appropriate lda matrix lda. The matrix contains MorphCircle objects as elements.
* The calculation is performed using the approach presented in the paper, based on Loewner order. The method returns the result
* of the wanted morphological operation in form of MorphColorMatrix.
*
* Argument type determines a morphological operation:
* 1) false -> DILATION
* 2) true -> EROSION
*/
static MorphColorMatrix morph_basic_operation(Circle *start, int pWidth, int *mask, int padding, bool type);
/*
* The method responsible for invoking calculations needed for performing a basic morphological operation on given image vector
* which has already beend prepared for calculations in the form of an array of Circle objects. The input vector inPrepared is
* expected to be size of (2 * padding + width) * (2 * padding + height). The original image is stored as an array of objects
* of type T on memory location in. Argument padding is a padding of the structuring element used in the morphological operation,
* which is pased as an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a
* mask has dimensions 5x5, the padding is 2. Output vector's size has to be width * height.
* Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving the
* smallest enclosing circle of circles problem. Type T should support the conversion to the type MorphCircle. In the other words, * a constructor MorphCircle(&T) must exist.
*
* Argument type determines a morphological operation:
* 1) false -> DILATION
* 2) true -> EROSION
*/
template<typename T>
static void morph_basic_handler(Circle *inPrepared, T* in, T *out, int width, int height, int padding, int *mask, bool type);
/*
* The method responsible for perfoming a wanted basic morphological operation on given image vector. The input vector in is
* expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size
* width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions
* 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in
* the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem.
* Type T should support the conversion to the type MorphCircle. In the other words, a constructor MorphCircle(&T) must exist.
*
* Argument type determines a morphological operation:
* 1) false -> DILATION
* 2) true -> EROSION
*/
template<typename T>
static void morph_basic_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type);
/*
* The method responsible for invoking calculations needed for performing a morphological operation on given image vector
* which has already beend prepared for calculations in the form of an array of Circle objects. The input vector inPrepared is
* expected to be size of (2 * padding + width) * (2 * padding + height). The original image is stored as an array of objects
* of type T on memory location in. Argument padding is a padding of the structuring element used in the morphological operation,
* which is pased as an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a
* mask has dimensions 5x5, the padding is 2. Output vector's size has to be width * height.
* Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving the
* smallest enclosing circle of circles problem. Type T should support the conversion to the type MorphCircle. In the other words, * a constructor MorphCircle(&T) must exist.
*
* Argument type determines a morphological operation:
* 1) false -> CLOSING
* 2) true -> OPENING
*/
template<typename T>
static void morph_second_order_handler(Circle *inPrepared, T *in, T *out, int width, int height, int padding, int *mask, bool type);
/*
* The method responsible for perfoming a wanted basic morphological operation on given image vector. The input vector in is
* expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size
* width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions
* 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in
* the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem.
* Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*
* Argument type determines a morphological operation:
* 1) false -> CLOSING
* 2) true -> OPENING
*/
template<typename T>
static void morph_second_order_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type);
/*
* The method responsible for invoking calculations needed for performing a morphological operation on given image vector
* which has already beend prepared for calculations in the form of an array of Circle objects. The input vector inPrepared is
* expected to be size of (2 * padding + width) * (2 * padding + height). The original image is stored as an array of objects
* of type T on memory location in. Argument padding is a padding of the structuring element used in the morphological operation,
* which is pased as an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a
* mask has dimensions 5x5, the padding is 2. Output vector's size has to be width * height.
* Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving the
* smallest enclosing circle of circles problem. Type T should support the conversion to the type MorphCircle. In the other words, * a constructor MorphCircle(&T) must exist.
*
* Argument type determines a morphological operation:
* 1) false -> BLACK TOP HAT
* 2) true -> WHITE TOP HAT
*/
template<typename T>
static void morph_hats_handler(Circle *inPrepared, T *in, T *out, int width, int height, int padding, int *mask, bool type);
/*
* The method responsible for perfoming a wanted morphological operation on given image vector. The input vector in is
* expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size
* width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions
* 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in
* the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem.
* Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*
* Argument type determines a morphological operation:
* 1) false -> BLACK TOP HAT
* 2) true -> WHITE TOP HAT
*/
template<typename T>
static void morph_hats_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type);
/*
* The method responsible for perfoming a morphological operation Beucher gradient on given image vector. The input vector in is
* expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size
* width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions
* 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in
* the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem.
* Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*/
template<typename T>
static void morph_beucher_launcher(T *in, T *out, int width, int height, int padding, int *mask);
/*
* The method responsible for perfoming a morphological operation self-dual top hat on given image vector. The input vector in is
* expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size
* width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions
* 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in
* the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem.
* Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*/
template<typename T>
static void morph_sdth_launcher(T *in, T *out, int width, int height, int padding, int *mask);
/*
* The method responsible for invoking calculations needed for performing a morphological operation on given image vector
* which has already beend prepared for calculations in the form of an array of Circle objects. The input vector inPrepared is
* expected to be size of (2 * padding + width) * (2 * padding + height). The original image is stored as an array of objects
* of type T on memory location in. Argument padding is a padding of the structuring element used in the morphological operation,
* which is pased as an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a
* mask has dimensions 5x5, the padding is 2. Output vector's size has to be width * height.
* Morphological operations are performed as explained in the paper, elements are compared using Loewner order, solving the
* smallest enclosing circle of circles problem. Type T should support the conversion to the type MorphCircle. In the other words, * a constructor MorphCircle(&T) must exist.
*
* Argument type determines a morphological operation:
* 1) false -> EXTERNAL GRADIENT
* 2) true -> INTERNAL GRADIENT
*/
template<typename T>
static void morph_gradients_handler(Circle *inPrepared, T *in, T *out, int width, int height, int padding, int *mask, bool type);
/*
* The method responsible for perfoming a wanted morphological operation on given image vector. The input vector in is
* expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size
* width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions
* 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in
* the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem.
* Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*
* Argument type determines a morphological operation:
* 1) false -> EXTERNAL GRADIENT
* 2) true -> EXTERNAL GRADIENT
*/
template<typename T>
static void morph_gradients_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type);
/*
* The method responsible for perfoming a morphological operation laplacian on given image vector. The input vector in is
* expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size
* width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions
* 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in
* the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem.
* Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*/
template<typename T>
static void morph_laplacian_launcher(T *in, T *out, int width, int height, int padding, int *mask);
/*
* The method responsible for perfoming a morphological operation shockfilter on given image vector. The input vector in is
* expected to be an image matrix containing objects of type T as elements. The vector containing the image matrix must have size
* width * height. Argument padding is a padding of the structuring element used in the morphological operation, which is pased as * an integer matrix stored on memory location mask with both dimensions (2 * padding + 1). For example, if a mask has dimensions
* 5x5, the padding is 2. Output vector's size has to be width * height. Morphological operations are performed as explained in
* the paper, elements are compared using Loewner order, solving the smallest enclosing circle of circles problem.
* Type T should support the conversion to the type MorphCircle. In the other words, constructor MorphCircle(&T) must exist.
*/
template<typename T>
static void morph_shock_launcher(T *in, T *out, int width, int height, int padding, int *mask);
/*
* A method responsible for calculating morphological operation shokfilter. Arguments preparedDilation and preparedErosion
* represent memory locations where two (2 * padding + width) * (2 * padding + height) matrices representing the original image
* that has already been prepared for performing dilation and erosion operations, respectively. Both matrices contain MorphCircle
* objects as elements. Also, a morphological laplacian of the original image has been stored on memory location laplacian, as a
* width * height matrix of objects T. The morphological shockfilter is performend as follows: if trace(laplacian[pixel]) < 0, a
* dilation of the selected pixel is performed, else, a erosion of the selected pixel is performed. The result is stored on memory
* location out.
*/
template<typename T>
static void morph_shock_operation(Circle *prepareDilation, Circle *prepareErosion, T *laplacian, T *out, int width, int height, int padding, int *mask);
/*
* A basic handler method for invoking launcher methods for performing all morphological operations which are introduced in the
* paper of B. Burgeth and A. Kleefeld. Memory location in must contain the original image matrix with elements of type T. The
* result of the selected morphological operation will be stored on memory location out. This memory location should be
* preallocated to the size of width * height. Argument padding is a padding of the given structural element (maks). For example,
* if the structuring element has dimensions 5x5, the padding is 2.
* Argument iters defines number of iterations.
*
* The morphological operation is determined by morphType argument:
* 0) DILATION
* 1) EROSION
* 2) CLOSING
* 3) OPENING
* 4) BLACK TOP HAT
* 5) WHITE TOP HAT
* 6) SELF-DUAL TOP HAT
* 7) BEUCHER GRADIENT
* 8) EXTERNAL GRADIENT
* 9) INTERNAL GRADIENT
* 10) MORPHOLOGICAL LAPLACIAN
* 11) SHOCK FILTER
*/
template<typename T>
static void morph_handle(T *in, T *out, int width, int height, int padding, int *mask, int morphType, int iters = 0);
// HELPER METHODS
/*
* Helper method that creates the output image (CImgFloatWrapper object) after performing the morphological operation.
*/
void createOutputImage();
/*
* Helper method for filling the array in with the given size with the given element alpha.
*/
template<typename T>
static void fill(T *in, int size, T alpha);
/*
* Helper method that prepares the image vector for morphological operations. The image vector is stored on memory location in.
* Its length should be width * height. The result is stored on the memory location out. Memory allocation should be done before
* calling this method. Out should be allocated to the size of (width + (2 * padding)) * (height + (2 * padding)) * sizeof(T)
* because a vector used in morphological operations should have an appropriate padding.
*
* Argument type determines a type of morphological operation that the vector needs to be prepared for:
* 1) false -> DILATION
* 2) true -> EROSION
*/
template<typename T>
static void prepare_vector(T *in, Circle *out, int width, int height, int padding, bool type);
/*
* Helper method for copying one array to another.
*/
template<typename T>
static void copy(T *in, T *out, int size);
/*
* Reading a structuring element (a mask) from a file specified by the given file name. Also, a mask dimension needs to be
* provided. The Mask is expected to be a maskDim * maskDim matrix containing only 0s and 1s.
*/
static void readMaskFromFile(int *maskPointer, int maskDim, const char *fileName);
// DEBUGGING
/*
* Helper method for printing the given matrix of MorphCircle objects to the standard output.
* Used for debbugging.
*/
static void print_vector(Circle *in, int width, int height, int lda);
/*
* Helper method for printing the given matrix of MorphColorMatrix objects to the standard output.
* Used for debbugging.
*/
static void print_vector(LoewnerMorphology::MorphColorMatrix *in, int width, int height, int lda);
/*
* Helper method for printing the given matrix of floats to the standard output.
* Used for debbugging.
*/
static void print_vector(float *in, int width, int height, int lda);
};
template<typename T>
void LoewnerMorphology::Morph::fill(T *in, int size, T alpha) {
#pragma omp parallel for
for (int i = 0; i < size; i++) {
in[i] = alpha;
}
}
template<typename T>
void LoewnerMorphology::Morph::copy(T *in, T *out, int size) {
#pragma omp parallel for
for (int i = 0; i < size; i++) {
out[i] = in[i];
}
}
template<typename T>
void LoewnerMorphology::Morph::prepare_vector(T *in, Circle *out, int width, int height, int padding, bool type) {
Circle element = (type) ? Circle(T::max()).prepareMin() : Circle(T::min()).prepareMax();
fill<Circle>(out, (width + 2 * padding) * (height + 2 * padding), element);
int pWidth = width + 2 * padding;
#pragma omp parallel for
for (int i = 0; i < height; i++) {
for(int j = 0; j < width; j++) {
out[(i + padding) * pWidth + (j + padding)] = (type) ? Circle(in[i * width + j]).prepareMin() : Circle(in[i * width + j]).prepareMax();
}
}
}
template<typename T>
void LoewnerMorphology::Morph::morph_basic_handler(Circle *inPrepared, T* in, T* out, int width, int height, int padding, int *mask, bool type) {
int pWidth = width + 2 * padding;
prepare_vector<T>(in, inPrepared, width, height, padding, type);
#pragma omp parallel for
for (int i = 0; i < height; i++) {
for(int j = 0; j < width; j++) {
Circle *current = inPrepared + i * pWidth + j;
out[i * width + j] = morph_basic_operation(current, pWidth, mask, padding, type);
}
}
}
template<typename T>
void LoewnerMorphology::Morph::morph_basic_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type) {
Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))];
morph_basic_handler(inPrepared, in, out, width, height, padding, mask, type);
delete inPrepared;
}
template<typename T>
void LoewnerMorphology::Morph::morph_second_order_handler(Circle *inPrepared, T* in, T* out, int width, int height, int padding, int *mask, bool type) {
morph_basic_handler(inPrepared, in, out, width, height, padding, mask, type);
morph_basic_handler(inPrepared, out, out, width, height, padding, mask, !type);
}
template<typename T>
void LoewnerMorphology::Morph::morph_second_order_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type) {
Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))];
morph_second_order_handler(inPrepared, in, out, width, height, padding, mask, type);
delete inPrepared;
}
template<typename T>
void LoewnerMorphology::Morph::morph_einstein_launcher(T *image1, T *image2, T *imageResult, int width, int height, int lda1, int lda2, int lda3) {
#pragma omp parallel for
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
LoewnerMorphology::MorphColorMatrix m1 = Circle(image1[i * lda1 + j]).toMorphColorMatrixSphere();
LoewnerMorphology::MorphColorMatrix m2 = Circle(image2[i * lda2 + j]).toMorphColorMatrixSphere().negate();
imageResult[i * lda3 + j] = Circle(EinsteinOperations::einsteinAdditionMod(m1, m2)).toMorphColorMatrixCone2Epsilon();
}
}
}
template<typename T>
void LoewnerMorphology::Morph::morph_hats_handler(Circle *inPrepared, T *in, T *out, int width, int height, int padding, int *mask, bool type) {
morph_second_order_handler(inPrepared, in, out, width, height, padding, mask, type);
if (type) {
morph_einstein_launcher(in, out, out, width, height, width, width, width);
} else {
morph_einstein_launcher(out, in, out, width, height, width, width, width);
}
}
template<typename T>
void LoewnerMorphology::Morph::morph_hats_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type) {
Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))];
morph_hats_handler(inPrepared, in, out, width, height, padding, mask, type);
delete inPrepared;
}
template<typename T>
void LoewnerMorphology::Morph::morph_beucher_launcher(T *in, T *out, int width, int height, int padding, int *mask) {
Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))];
T *temp = new T[width * height];
morph_basic_handler(inPrepared, in, out, width, height, padding, mask, false);
morph_basic_handler(inPrepared, in, temp, width, height, padding, mask, true);
morph_einstein_launcher(out, temp, out, width, height, width, width, width);
delete inPrepared;
delete temp;
}
template<typename T>
void LoewnerMorphology::Morph::morph_sdth_launcher(T *in, T *out, int width, int height, int padding, int *mask) {
Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))];
T *temp = new T[width * height];
morph_second_order_handler(inPrepared, in, out, width, height, padding, mask, false);
morph_second_order_handler(inPrepared, in, temp, width, height, padding, mask, true);
morph_einstein_launcher(out, temp, out, width, height, width, width, width);
delete inPrepared;
delete temp;
}
template<typename T>
void LoewnerMorphology::Morph::morph_gradients_handler(Circle *inPrepared, T *in, T *out, int width, int height, int padding, int *mask, bool type) {
morph_basic_handler(inPrepared, in, out, width, height, padding, mask, type);
if (type) {
morph_einstein_launcher(in, out, out, width, height, width, width, width);
} else {
morph_einstein_launcher(out, in, out, width, height, width, width, width);
}
}
template<typename T>
void LoewnerMorphology::Morph::morph_gradients_launcher(T *in, T *out, int width, int height, int padding, int *mask, bool type) {
Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))];
morph_gradients_handler(inPrepared, in, out, width, height, padding, mask, type);
delete inPrepared;
}
template<typename T>
void LoewnerMorphology::Morph::morph_laplacian_launcher(T *in, T *out, int width, int height, int padding, int *mask) {
Circle *inPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))];
T *temp = new T[width * height];
morph_gradients_handler(inPrepared, in, out, width, height, padding, mask, false);
morph_gradients_handler(inPrepared, in, temp, width, height, padding, mask, true);
morph_einstein_launcher(out, temp, out, width, height, width, width, width);
delete inPrepared;
delete temp;
}
template<typename T>
void LoewnerMorphology::Morph::morph_shock_launcher(T *in, T *out, int width, int height, int padding, int *mask) {
T *laplacian = new T[width * height];
Circle *dilationPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))];
Circle *erosionPrepared = new Circle[(width + (2 * padding)) * (height + (2 * padding))];
prepare_vector<T>(in, dilationPrepared, width, height, padding, false);
prepare_vector<T>(in, erosionPrepared, width, height, padding, true);
morph_laplacian_launcher(in, laplacian, width, height, padding, mask);
morph_shock_operation(dilationPrepared, erosionPrepared, laplacian, out, width, height, padding, mask);
delete dilationPrepared;
delete erosionPrepared;
delete laplacian;
}
template<typename T>
void LoewnerMorphology::Morph::morph_shock_operation(Circle *dilationPrepared, Circle *erosionPrepared, T *laplacian, T *out, int width, int height, int padding, int *mask) {
int pWidth = width + 2 * padding;
#pragma omp parallel for
for (int i = 0; i < height; i++) {
for(int j = 0; j < width; j++) {
int currentIdx = i * pWidth + j;
if (laplacian[i * width + j].trace() <= 0) {
out[i * width + j] = morph_basic_operation(dilationPrepared + currentIdx, pWidth, mask, padding, false);
} else {
out[i * width + j] = morph_basic_operation(erosionPrepared + currentIdx, pWidth, mask, padding, true);
}
}
}
}
template<typename T>
void LoewnerMorphology::Morph::morph_handle(T *in, T *out, int width, int height, int padding, int *mask, int morphType, int iters) {
if (iters < 1) {
printf("Operation cannot be executed. Number of iterations must be greater than 0, but %d provided.\n", iters);
exit(EXIT_FAILURE);
}
T *temp = nullptr;
if (iters > 1) {
temp = new T[width * height];
}
switch (morphType) {
case 0:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_basic_launcher<T>(in, out, width, height, padding, mask, false);
} else {
copy(out, temp, width * height);
morph_basic_launcher<T>(temp, out, width, height, padding, mask, false);
}
}
break;
case 1:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_basic_launcher<T>(in, out, width, height, padding, mask, true);
} else {
copy(out, temp, width * height);
morph_basic_launcher<T>(temp, out, width, height, padding, mask, true);
}
}
break;
case 2:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_second_order_launcher<T>(in, out, width, height, padding, mask, false);
} else {
copy(out, temp, width * height);
morph_second_order_launcher<T>(temp, out, width, height, padding, mask, false);
}
}
break;
case 3:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_second_order_launcher<T>(in, out, width, height, padding, mask, true);
} else {
copy(out, temp, width * height);
morph_second_order_launcher<T>(temp, out, width, height, padding, mask, true);
}
}
break;
case 4:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_hats_launcher<T>(in, out, width, height, padding, mask, false);
} else {
copy(out, temp, width * height);
morph_hats_launcher<T>(temp, out, width, height, padding, mask, false);
}
}
break;
case 5:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_hats_launcher<T>(in, out, width, height, padding, mask, true);
} else {
copy(out, temp, width * height);
morph_hats_launcher<T>(temp, out, width, height, padding, mask, true);
}
}
break;
case 6:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_sdth_launcher<T>(in, out, width, height, padding, mask);
} else {
copy(out, temp, width * height);
morph_sdth_launcher<T>(temp, out, width, height, padding, mask);
}
}
break;
case 7:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_beucher_launcher<T>(in, out, width, height, padding, mask);
} else {
copy(out, temp, width * height);
morph_beucher_launcher<T>(temp, out, width, height, padding, mask);
}
}
break;
case 8:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_gradients_launcher<T>(in, out, width, height, padding, mask, false);
} else {
copy(out, temp, width * height);
morph_gradients_launcher<T>(temp, out, width, height, padding, mask, false);
}
}
break;
case 9:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_gradients_launcher<T>(in, out, width, height, padding, mask, true);
} else {
copy(out, temp, width * height);
morph_gradients_launcher<T>(temp, out, width, height, padding, mask, true);
}
}
break;
case 10:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_laplacian_launcher<T>(in, out, width, height, padding, mask);
} else {
copy(out, temp, width * height);
morph_laplacian_launcher<T>(temp, out, width, height, padding, mask);
}
}
break;
case 11:
for (int i = 0; i < iters; i++) {
if (i == 0) {
morph_shock_launcher<T>(in, out, width, height, padding, mask);
} else {
copy(out, temp, width * height);
morph_shock_launcher<T>(temp, out, width, height, padding, mask);
}
}
break;
}
if (temp != nullptr) {
delete temp;
}
}
#endif
|
GB_unop__round_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__round_fc64_fc64)
// op(A') function: GB (_unop_tran__round_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_cround (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_cround (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_cround (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ROUND || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__round_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cround (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_cround (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__round_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cones.c | /*
* The MIT License (MIT)
*
* Copyright (c) 2017 Pantelis Sopasakis (https://alphaville.github.io),
* Krina Menounou (https://www.linkedin.com/in/krinamenounou),
* Panagiotis Patrinos (http://homes.esat.kuleuven.be/~ppatrino)
* Copyright (c) 2012 Brendan O'Donoghue (bodonoghue85@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include "scs.h"
#include "scs_blas.h" /* contains BLAS(X) macros and type info */
#define CONE_RATE (2)
#define CONE_TOL (1e-8)
#define CONE_THRESH (1e-6)
#define EXP_CONE_MAX_ITERS (100)
#define POW_CONE_MAX_ITERS (20)
#ifdef LAPACK_LIB_FOUND
extern void BLAS(syevr)(const char *jobz, const char *range, const char *uplo,
blasint *n, scs_float *a, blasint *lda, scs_float *vl,
scs_float *vu, blasint *il, blasint *iu, scs_float *abstol,
blasint *m, scs_float *w, scs_float *z, blasint *ldz,
blasint *isuppz, scs_float *work, blasint *lwork,
blasint *iwork, blasint *liwork, blasint *info);
extern void BLAS(syr)(const char *uplo, const blasint *n, const scs_float *alpha,
const scs_float *x, const blasint *incx, scs_float *a,
const blasint *lda);
extern void BLAS(scal)(const blasint *n, const scs_float *sa, scs_float *sx,
const blasint *incx);
extern scs_float BLAS(nrm2)(const blasint *n, scs_float *x, const blasint *incx);
#endif
static scs_int getSdConeSize(scs_int s) {
return (s * (s + 1)) / 2;
}
/*
* boundaries will contain array of indices of rows of A corresponding to
* cone boundaries, boundaries[0] is starting index for cones of size strictly
* larger than 1
* returns length of boundaries array, boundaries malloc-ed here so should be
* freed
*/
scs_int scs_get_cone_boundaries(
const ScsCone * RESTRICT k,
scs_int * * RESTRICT boundaries) {
scs_int i, count = 0;
scs_int len = 1 + k->qsize + k->ssize + k->ed + k->ep + k->psize;
scs_int *RESTRICT b = scs_malloc(sizeof (scs_int) * len);
b[count] = k->f + k->l;
count += 1;
if (k->qsize > 0) {
memcpy(&b[count], k->q, k->qsize * sizeof (scs_int));
}
count += k->qsize;
for (i = 0; i < k->ssize; ++i) {
b[count + i] = getSdConeSize(k->s[i]);
}
count += k->ssize;
for (i = 0; i < k->ep + k->ed; ++i) {
b[count + i] = 3;
}
count += k->ep + k->ed;
for (i = 0; i < k->psize; ++i) {
b[count + i] = 3;
}
/* count += k->psize; */
*boundaries = b;
return len;
}
static scs_int getFullConeDims(const ScsCone *RESTRICT k) {
scs_int i, c = 0;
if (k->f)
c += k->f;
if (k->l)
c += k->l;
if (k->qsize && k->q) {
for (i = 0; i < k->qsize; ++i) {
c += k->q[i];
}
}
if (k->ssize && k->s) {
for (i = 0; i < k->ssize; ++i) {
c += getSdConeSize(k->s[i]);
}
}
if (k->ed)
c += 3 * k->ed;
if (k->ep)
c += 3 * k->ep;
if (k->p)
c += 3 * k->psize;
return c;
}
scs_int scs_validate_cones(const ScsData * RESTRICT d, const ScsCone * RESTRICT k) {
scs_int i;
if (getFullConeDims(k) != d->m) {
scs_printf("cone dimensions %li not equal to num rows in A = m = %li\n",
(long) getFullConeDims(k), (long) d->m);
return -1;
}
if (k->f && k->f < 0) {
scs_printf("free cone error\n");
return -1;
}
if (k->l && k->l < 0) {
scs_printf("lp cone error\n");
return -1;
}
if (k->qsize && k->q) {
if (k->qsize < 0) {
scs_printf("soc cone error\n");
return -1;
}
for (i = 0; i < k->qsize; ++i) {
if (k->q[i] < 0) {
scs_printf("soc cone error\n");
return -1;
}
}
}
if (k->ssize && k->s) {
if (k->ssize < 0) {
scs_printf("sd cone error\n");
return -1;
}
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] < 0) {
scs_printf("sd cone error\n");
return -1;
}
}
}
if (k->ed && k->ed < 0) {
scs_printf("ep cone error\n");
return -1;
}
if (k->ep && k->ep < 0) {
scs_printf("ed cone error\n");
return -1;
}
if (k->psize && k->p) {
if (k->psize < 0) {
scs_printf("power cone error\n");
return -1;
}
for (i = 0; i < k->psize; ++i) {
if (k->p[i] < -1 || k->p[i] > 1) {
scs_printf("power cone error, values must be in [-1,1]\n");
return -1;
}
}
}
return 0;
}
char *scs_get_cone_summary(const ScsInfo * RESTRICT info, ScsConeWork * RESTRICT c) {
char *str = scs_malloc(sizeof (char) * 64);
sprintf(str, "\tCones: avg projection time: %1.2es\n",
c->total_cone_time / (info->iter + 1) / 1e3);
c->total_cone_time = 0.0;
return str;
}
void scs_finish_cone(ScsConeWork * RESTRICT c) {
#ifdef LAPACK_LIB_FOUND
scs_free(c->Xs);
scs_free(c->Z);
scs_free(c->e);
scs_free(c->work);
scs_free(c->iwork);
#endif
scs_free(c);
}
char *scs_get_cone_header(const ScsCone * RESTRICT k) {
char *tmp = scs_malloc(sizeof (char) * 512);
scs_int i, socVars, socBlks, sdVars, sdBlks;
sprintf(tmp, "Cones:");
if (k->f) {
sprintf(tmp + strlen(tmp), "\tprimal zero / dual free vars: %li\n",
(long) k->f);
}
if (k->l) {
sprintf(tmp + strlen(tmp), "\tlinear vars: %li\n", (long) k->l);
}
socVars = 0;
socBlks = 0;
if (k->qsize && k->q) {
socBlks = k->qsize;
for (i = 0; i < k->qsize; i++) {
socVars += k->q[i];
}
sprintf(tmp + strlen(tmp), "\tsoc vars: %li, soc blks: %li\n",
(long) socVars, (long) socBlks);
}
sdVars = 0;
sdBlks = 0;
if (k->ssize && k->s) {
sdBlks = k->ssize;
for (i = 0; i < k->ssize; i++) {
sdVars += getSdConeSize(k->s[i]);
}
sprintf(tmp + strlen(tmp), "\tsd vars: %li, sd blks: %li\n",
(long) sdVars, (long) sdBlks);
}
if (k->ep || k->ed) {
sprintf(tmp + strlen(tmp), "\texp vars: %li, dual exp vars: %li\n",
(long) 3 * k->ep, (long) 3 * k->ed);
}
if (k->psize && k->p) {
sprintf(tmp + strlen(tmp), "\tprimal + dual power vars: %li\n",
(long) 3 * k->psize);
}
return tmp;
}
static scs_int isSimpleSemiDefiniteCone(scs_int * RESTRICT s, scs_int ssize) {
scs_int i;
for (i = 0; i < ssize; i++) {
if (s[i] > 2) {
return 0; /* false */
}
}
return 1; /* true */
}
static scs_float expNewtonOneD(scs_float rho, scs_float y_hat, scs_float z_hat) {
scs_float t = MAX(-z_hat, 1e-6);
scs_float f, fp;
scs_int i;
for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) {
f = t * (t + z_hat) / rho / rho - y_hat / rho + log(t / rho) + 1;
fp = (2 * t + z_hat) / rho / rho + 1 / t;
t = t - f / fp;
if (t <= -z_hat) {
return 0;
} else if (t <= 0) {
return z_hat;
} else if (ABS(f) < CONE_TOL) {
break;
}
}
return t + z_hat;
}
static void expSolveForXWithRho(scs_float * RESTRICT v, scs_float * RESTRICT x, scs_float rho) {
x[2] = expNewtonOneD(rho, v[1], v[2]);
x[1] = (x[2] - v[2]) * x[2] / rho;
x[0] = v[0] - rho;
}
static scs_float expCalcGrad(scs_float * RESTRICT v, scs_float * RESTRICT x, scs_float rho) {
expSolveForXWithRho(v, x, rho);
if (x[1] <= 1e-12) {
return x[0];
}
return x[0] + x[1] * log(x[1] / x[2]);
}
static void expGetRhoUb(scs_float * RESTRICT v, scs_float * RESTRICT x, scs_float * RESTRICT ub, scs_float *lb) {
*lb = 0;
*ub = 0.125;
while (expCalcGrad(v, x, *ub) > 0) {
*lb = *ub;
(*ub) *= 2;
}
}
/* project onto the exponential cone, v has dimension *exactly* 3 */
static scs_int projExpCone(scs_float * RESTRICT v, scs_int iter) {
scs_int i;
scs_float ub, lb, rho, g, x[3];
scs_float r = v[0], s = v[1], t = v[2];
scs_float tol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 /
POWF((iter + 1), CONE_RATE)); */
/* v in cl(Kexp) */
if ((s > 0 && s * exp(r / s) - t <= CONE_THRESH) ||
(r <= 0 && s == 0 && t >= 0)) {
return 0;
}
/* -v in Kexp^* */
if ((-r < 0 && r * exp(s / r) + exp(1) * t <= CONE_THRESH) ||
(-r == 0 && -s >= 0 && -t >= 0)) {
memset(v, 0, 3 * sizeof (scs_float));
return 0;
}
/* special case with analytical solution */
if (r < 0 && s < 0) {
v[1] = 0.0;
v[2] = MAX(v[2], 0);
return 0;
}
/* iterative procedure to find projection, bisects on dual variable: */
expGetRhoUb(v, x, &ub, &lb); /* get starting upper and lower bounds */
for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) {
rho = (ub + lb) / 2; /* halfway between upper and lower bounds */
g = expCalcGrad(v, x, rho); /* calculates gradient wrt dual var */
if (g > 0) {
lb = rho;
} else {
ub = rho;
}
if (ub - lb < tol) {
break;
}
}
v[0] = x[0];
v[1] = x[1];
v[2] = x[2];
return 0;
}
static scs_int setUpSdScsConeWorkSpace(ScsConeWork * RESTRICT c, const ScsCone * RESTRICT k) {
#ifdef LAPACK_LIB_FOUND
scs_int i;
blasint nMax = 0;
scs_float eigTol = 1e-8;
blasint negOne = -1;
blasint m = 0;
blasint info;
scs_float wkopt;
/* eigenvector decomp workspace */
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] > nMax) {
nMax = (blasint) k->s[i];
}
}
c->Xs = scs_calloc(nMax * nMax, sizeof (scs_float));
c->Z = scs_calloc(nMax * nMax, sizeof (scs_float));
c->e = scs_calloc(nMax, sizeof (scs_float));
BLAS(syevr)("Vectors", "All", "Lower", &nMax, c->Xs, &nMax, SCS_NULL,
SCS_NULL, SCS_NULL, SCS_NULL, &eigTol, &m, c->e, c->Z, &nMax,
SCS_NULL, &wkopt, &negOne, &(c->liwork), &negOne, &info);
if (info != 0) {
scs_printf("FATAL: syevr failure, info = %li\n", (long) info);
return -1;
}
c->lwork = (blasint) (wkopt + 0.01); /* 0.01 for int casting safety */
c->work = scs_malloc(c->lwork * sizeof (scs_float));
c->iwork = scs_malloc(c->liwork * sizeof (blasint));
if (c->Xs == SCS_NULL || c->Z == SCS_NULL
|| c->e == SCS_NULL || c->work == SCS_NULL
|| c->iwork == SCS_NULL) {
return -1;
}
return 0;
#else
scs_printf("FATAL: Cannot solve SDPs with > 2x2 matrices without linked "
"blas+lapack libraries\n");
scs_printf("Install blas+lapack and re-compile SCS with blas+lapack libray "
"locations\n");
return -1;
#endif
}
ScsConeWork *scs_init_conework(const ScsCone * RESTRICT k) {
ScsConeWork * RESTRICT coneWork = scs_calloc(1, sizeof (ScsConeWork));
coneWork->total_cone_time = 0.0;
if (k->ssize && k->s) {
if (isSimpleSemiDefiniteCone(k->s, k->ssize) == 0 &&
setUpSdScsConeWorkSpace(coneWork, k) < 0) {
scs_finish_cone(coneWork);
return SCS_NULL;
}
}
return coneWork;
}
scs_int project2By2Sdc(scs_float *X) {
scs_float a, b, d, l1, l2, x1, x2, rad;
scs_float sqrt2 = SQRTF(2.0);
a = X[0];
b = X[1] / sqrt2;
d = X[2];
if (ABS(b) < 1e-6) { /* diagonal matrix */
X[0] = MAX(a, 0);
X[1] = 0;
X[2] = MAX(d, 0);
return 0;
}
rad = SQRTF((a - d) * (a - d) + 4 * b * b);
/* l1 >= l2 always, since rad >= 0 */
l1 = 0.5 * (a + d + rad);
l2 = 0.5 * (a + d - rad);
if (l2 >= 0) { /* both eigs positive already */
return 0;
}
if (l1 <= 0) { /* both eigs negative, set to 0 */
X[0] = 0;
X[1] = 0;
X[2] = 0;
return 0;
}
/* l1 pos, l2 neg */
x1 = 1 / SQRTF(1 + (l1 - a) * (l1 - a) / b / b);
x2 = x1 * (l1 - a) / b;
X[0] = l1 * x1 * x1;
X[1] = (l1 * x1 * x2) * sqrt2;
X[2] = l1 * x2 * x2;
return 0;
}
/* size of X is getSdConeSize(n) */
static scs_int projSemiDefiniteCone(
scs_float * RESTRICT X,
const scs_int n,
ScsConeWork * RESTRICT c,
const scs_int iter) {
/* project onto the positive semi-definite cone */
#ifdef LAPACK_LIB_FOUND
scs_int i;
blasint one = 1;
blasint m = 0;
blasint nb = (blasint) n;
blasint nbPlusOne = (blasint) (n + 1);
blasint coneSz = (blasint) (getSdConeSize(n));
scs_float sqrt2 = SQRTF(2.0);
scs_float sqrt2Inv = 1.0 / sqrt2;
scs_float *RESTRICT Xs = c->Xs;
scs_float *RESTRICT Z = c->Z;
scs_float *RESTRICT e = c->e;
scs_float *RESTRICT work = c->work;
blasint *RESTRICT iwork = c->iwork;
blasint lwork = c->lwork;
blasint liwork = c->liwork;
scs_float eigTol = CONE_TOL; /* iter < 0 ? CONE_TOL : MAX(CONE_TOL, 1 /
POWF(iter + 1, CONE_RATE)); */
scs_float zero = 0.0;
blasint info;
scs_float vupper;
#endif /* LAPACK_LIB_FOUND */
if (n == 0) {
return 0;
}
if (n == 1) {
if (X[0] < 0.0) {
X[0] = 0.0;
}
return 0;
}
if (n == 2) {
return project2By2Sdc(X);
}
#ifdef LAPACK_LIB_FOUND
/* expand lower triangular matrix to full matrix */
for (i = 0; i < n; ++i) {
memcpy(&(Xs[i * (n + 1)]), &(X[i * n - ((i - 1) * i) / 2]),
(n - i) * sizeof (scs_float));
}
/*
rescale so projection works, and matrix norm preserved
see http://www.seas.ucla.edu/~vandenbe/publications/mlbook.pdf pg 3
*/
/* scale diags by sqrt(2) */
BLAS(scal)(&nb, &sqrt2, Xs, &nbPlusOne); /* not nSquared */
/* max-eig upper bounded by frobenius norm */
vupper = 1.1 * sqrt2 *
BLAS(nrm2)(&coneSz, X,
&one); /* mult by factor to make sure is upper bound */
vupper = MAX(vupper, 0.01);
/* Solve eigenproblem, reuse workspaces */
BLAS(syevr)("Vectors", "VInterval", "Lower", &nb, Xs, &nb, &zero, &vupper,
SCS_NULL, SCS_NULL, &eigTol, &m, e, Z, &nb, SCS_NULL, work,
&lwork, iwork, &liwork, &info);
if (info < 0)
return -1;
memset(Xs, 0, n * n * sizeof (scs_float));
for (i = 0; i < m; ++i) {
scs_float a = e[i];
BLAS(syr)("Lower", &nb, &a, &(Z[i * n]), &one, Xs, &nb);
}
/* scale diags by 1/sqrt(2) */
BLAS(scal)(&nb, &sqrt2Inv, Xs, &nbPlusOne); /* not nSquared */
/* extract just lower triangular matrix */
for (i = 0; i < n; ++i) {
memcpy(&(X[i * n - ((i - 1) * i) / 2]), &(Xs[i * (n + 1)]),
(n - i) * sizeof (scs_float));
}
#else /* LAPACK_LIB_FOUND */
scs_printf("FAILURE: solving SDP with > 2x2 matrices, but no blas/lapack "
"libraries were linked!\n");
scs_printf("SCS will return nonsense!\n");
scs_scale_array(X, NAN, n);
return -1;
#endif /* LAPACK_LIB_FOUND */
return 0;
}
static scs_float powCalcX(scs_float r, scs_float xh, scs_float rh, scs_float a) {
scs_float x = 0.5 * (xh + SQRTF(xh * xh + 4 * a * (rh - r) * r));
return MAX(x, 1e-12);
}
static scs_float powCalcdxdr(scs_float x, scs_float xh, scs_float rh, scs_float r,
scs_float a) {
return a * (rh - 2 * r) / (2 * x - xh);
}
static scs_float powCalcF(scs_float x, scs_float y, scs_float r, scs_float a) {
return POWF(x, a) * POWF(y, (1 - a)) - r;
}
static scs_float powCalcFp(scs_float x, scs_float y, scs_float dxdr, scs_float dydr,
scs_float a) {
return POWF(x, a) * POWF(y, (1 - a)) * (a * dxdr / x + (1 - a) * dydr / y) -
1;
}
static void projPowerCone(scs_float *RESTRICT v, scs_float a) {
scs_float xh = v[0], yh = v[1], rh = ABS(v[2]);
scs_float x, y, r;
scs_int i;
/* v in K_a */
if (xh >= 0 && yh >= 0 &&
CONE_THRESH + POWF(xh, a) * POWF(yh, (1 - a)) >= rh)
return;
/* -v in K_a^* */
if (xh <= 0 && yh <= 0 &&
CONE_THRESH + POWF(-xh, a) * POWF(-yh, 1 - a) >=
rh * POWF(a, a) * POWF(1 - a, 1 - a)) {
v[0] = v[1] = v[2] = 0;
return;
}
r = rh / 2;
for (i = 0; i < POW_CONE_MAX_ITERS; ++i) {
scs_float f, fp, dxdr, dydr;
x = powCalcX(r, xh, rh, a);
y = powCalcX(r, yh, rh, 1 - a);
f = powCalcF(x, y, r, a);
if (ABS(f) < CONE_TOL)
break;
dxdr = powCalcdxdr(x, xh, rh, r, a);
dydr = powCalcdxdr(y, yh, rh, r, (1 - a));
fp = powCalcFp(x, y, dxdr, dydr, a);
r = MAX(r - f / fp, 0);
r = MIN(r, rh);
}
v[0] = x;
v[1] = y;
v[2] = (v[2] < 0) ? -(r) : (r);
}
/* outward facing cone projection routine, iter is outer algorithm iteration, if
iter < 0 then iter is ignored
warm_start contains guess of projection (can be set to SCS_NULL) */
scs_int scs_project_dual_cone(
scs_float * RESTRICT x,
const ScsCone * RESTRICT k,
ScsConeWork * RESTRICT c,
const scs_float * RESTRICT warm_start,
scs_int iter) {
scs_int i;
scs_int count = (k->f ? k->f : 0);
ScsTimer coneTimer;
scs_tic(&coneTimer);
if (k->l) {
/* project onto positive orthant */
for (i = count; i < count + k->l; ++i) {
if (x[i] < 0.0)
x[i] = 0.0;
/* x[i] = (x[i] < 0.0) ? 0.0 : x[i]; */
}
count += k->l;
}
if (k->qsize && k->q) {
/* project onto SOC */
for (i = 0; i < k->qsize; ++i) {
if (k->q[i] == 0) {
continue;
}
if (k->q[i] == 1) {
if (x[count] < 0.0)
x[count] = 0.0;
} else {
scs_float v1 = x[count];
scs_float s = scs_norm(&(x[count + 1]), k->q[i] - 1);
scs_float alpha = (s + v1) / 2.0;
if (s <= v1) { /* do nothing */
} else if (s <= -v1) {
memset(&(x[count]), 0, k->q[i] * sizeof (scs_float));
} else {
x[count] = alpha;
scs_scale_array(&(x[count + 1]), alpha / s, k->q[i] - 1);
}
}
count += k->q[i];
}
}
if (k->ssize && k->s) {
/* project onto PSD cone */
for (i = 0; i < k->ssize; ++i) {
if (k->s[i] == 0) {
continue;
}
if (projSemiDefiniteCone(&(x[count]), k->s[i], c, iter) < 0)
return -1;
count += getSdConeSize(k->s[i]);
}
}
if (k->ep) {
scs_float r, s, t;
scs_int idx;
/*
* exponential cone is not self dual, if s \in K
* then y \in K^* and so if K is the primal cone
* here we project onto K^*, via Moreau
* \Pi_C^*(y) = y + \Pi_C(-y)
*/
scs_scale_array(&(x[count]), -1, 3 * k->ep); /* x = -x; */
#ifdef _OPENMP
#pragma omp parallel for private(r, s, t, idx)
#endif
for (i = 0; i < k->ep; ++i) {
idx = count + 3 * i;
r = x[idx];
s = x[idx + 1];
t = x[idx + 2];
projExpCone(&(x[idx]), iter);
x[idx] -= r;
x[idx + 1] -= s;
x[idx + 2] -= t;
}
count += 3 * k->ep;
}
if (k->ed) {
/* exponential cone: */
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < k->ed; ++i) {
projExpCone(&(x[count + 3 * i]), iter);
}
count += 3 * k->ed;
}
if (k->psize && k->p) {
scs_float v[3];
scs_int idx;
/* don't use openmp for power cone
ifdef _OPENMP
pragma omp parallel for private(v, idx)
endif
*/
for (i = 0; i < k->psize; ++i) {
idx = count + 3 * i;
if (k->p[i] <= 0) {
/* dual power cone */
projPowerCone(&(x[idx]), -k->p[i]);
} else {
/* primal power cone, using Moreau */
v[0] = -x[idx];
v[1] = -x[idx + 1];
v[2] = -x[idx + 2];
projPowerCone(v, k->p[i]);
x[idx] += v[0];
x[idx + 1] += v[1];
x[idx + 2] += v[2];
}
}
/* count += 3 * k->psize; */
}
/* project onto OTHER cones */
if (c) {
c->total_cone_time += scs_toc_quiet(&coneTimer);
}
return 0;
}
|
ft.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - FT
This benchmark is an OpenMP C version of the NPB FT code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: D. Bailey
W. Saphir
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npb-C.h"
/* global variables */
#include "global.h"
/* function declarations */
static void compute_initial_conditions (float u0_r[NTOTAL],
float u0_i[NTOTAL]);
static void ipow46 (float a, int exponent, float *result);
static void setup (void);
static void print_timers (void);
static void fft (int dir, float x1_r[NTOTAL], float x1_i[NTOTAL],
float x2_r[NTOTAL], float x2_i[NTOTAL]);
static void fft_init (int n);
static void cfftz (int is, int m, int n, float x_r[NTOTAL],
float x_i[NTOTAL], float y_r[NTOTAL], float y_i[NTOTAL],
int di1, int di2);
static void fftz2 (int is, int l, int m, int n,
float u_r[NX], float u_i[NX],
float x_r[NTOTAL], float x_i[NTOTAL],
float y_r[NTOTAL], float y_i[NTOTAL],
int di1, int di2);
static int ilog2 (int n);
static void verify (int d1, int d2, int d3, int nt,
boolean * verified, char *classT);
/*--------------------------------------------------------------------
c FT benchmark
c-------------------------------------------------------------------*/
int
main (int argc, char **argv)
{
/*c-------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i_main, ierr;
/*------------------------------------------------------------------
c u0, u1, u2 are the main arrays in the problem.
c Depending on the decomposition, these arrays will have different
c dimensions. To accomodate all possibilities, we allocate them as
c one-dimensional arrays and pass them to subroutines for different
c views
c - u0 contains the initial (transformed) initial condition
c - u1 and u2 are working arrays
c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the
c time evolution operator.
c-----------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Large arrays are in common so that they are allocated on the
c heap rather than the stack. This common block is not
c referenced directly anywhere else. Padding is to avoid accidental
c cache problems, since all array sizes are powers of two.
c-------------------------------------------------------------------*/
static float u0_r[NTOTAL]; //u0_r[NZ][NY][NX];
static float u0_i[NTOTAL]; //u0_i[NZ][NY][NX];
static float u1_r[NTOTAL]; //u1_r[NZ][NY][NX];
static float u1_i[NTOTAL]; //u1_i[NZ][NY][NX];
static float u2_r[NTOTAL]; //u2_r[NZ][NY][NX];
static float u2_i[NTOTAL]; //u2_i[NZ][NY][NX];
static int indexmap[NTOTAL]; //indexmap[NZ][NY][NX];
int iter;
int nthreads = 1;
double total_time, mflops;
boolean verified;
char classT;
//////////////////////////////////
// Used for compute_indexmap(). //
//////////////////////////////////
int i, j, k, ii, ii2, jj, ij2, kk;
int m;
float ap;
////////////////////////
// Used for evolve(). //
////////////////////////
//int i, j, k;
//////////////////////////
// Used for checksum(). //
//////////////////////////
//int m, j,
int q, r, s;
float chk_r, chk_i;
/////////////////////
// Used for fft(). //
/////////////////////
int dir;
static float y0_r[NTOTAL];
static float y0_i[NTOTAL];
static float y1_r[NTOTAL];
static float y1_i[NTOTAL];
int logNX, logNY, logNZ;
///////////////////////
//Used for cffts1(). //
///////////////////////
//int i, j, k, jj, m;
int id;
int is;
///////////////////////
// Used for cfftz(). //
///////////////////////
int l;
///////////////////////
// Used for fftz2(). //
///////////////////////
int k_fftz2, n1, li, lj, lk, ku, i_fftz2, i11, i12, i21, i22;
float u1_rf, x11_r, x21_r;
float u1_if, x11_i, x21_i;
int idx, p, nn;
float x11real, x11imag, x21real, x21imag;
/*--------------------------------------------------------------------
c Run the entire problem once to make sure all data is touched.
c This reduces variable startup costs, which is important for such a
c short benchmark. The other NPB 2 implementations are similar.
c-------------------------------------------------------------------*/
for (i_main = 0; i_main < T_MAX; i_main++)
{
timer_clear (i_main);
}
setup ();
{
//compute_indexmap (indexmap);
/*--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n/2, n) - n/2
c-------------------------------------------------------------------*/
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
ii = (i + NX / 2) % NX - NX / 2;
ii2 = ii * ii;
jj = (j + NY / 2) % NY - NY / 2;
ij2 = jj * jj + ii2;
kk = (k + NZ / 2) % NZ - NZ / 2;
indexmap[m] = kk * kk + ij2;
}
/*--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------*/
{
ap = -4.0F * ALPHA * PI * PI;
ex[0] = 1.0F;
ex[1] = exp (ap);
for (i = 2; i <= EXPMAX; i++)
{
ex[i] = ex[i - 1] * ex[1];
}
} /* end single */
{
compute_initial_conditions (u1_r, u1_i);
fft_init (dims[0][0]);
}
fft (1, u1_r, u1_i, u0_r, u0_i);
} /* end parallel */
/*--------------------------------------------------------------------
c Start over from the beginning. Note that all operations must
c be timed, in contrast to other benchmarks.
c-------------------------------------------------------------------*/
for (i_main = 0; i_main < T_MAX; i_main++)
{
timer_clear (i_main);
}
timer_start (T_TOTAL);
if (TIMERS_ENABLED == TRUE)
timer_start (T_SETUP);
//#pragma omp parallel private(iter) firstprivate(niter)
#pragma acc data \
create(ex[0:EXPMAX+1]) \
create(indexmap[0:NTOTAL]) \
create(u_r[0:NX], u_i[0:NX]) \
create(u1_r[0:NTOTAL], u1_i[0:NTOTAL]) \
create(u0_r[0:NTOTAL], u0_i[0:NTOTAL]) \
create(u2_r[0:NTOTAL], u2_i[0:NTOTAL]) \
create(y0_r[0:NTOTAL], y0_i[0:NTOTAL]) \
create(y1_r[0:NTOTAL], y1_i[0:NTOTAL])
{
//compute_indexmap (indexmap);
/*--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n/2, n) - n/2
c-------------------------------------------------------------------*/
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
ii = (i + NX / 2) % NX - NX / 2;
ii2 = ii * ii;
jj = (j + NY / 2) % NY - NY / 2;
ij2 = jj * jj + ii2;
kk = (k + NZ / 2) % NZ - NZ / 2;
indexmap[m] = kk * kk + ij2;
}
/*--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------*/
{
ap = -4.0F * ALPHA * PI * PI;
ex[0] = 1.0F;
ex[1] = exp (ap);
for (i = 2; i <= EXPMAX; i++)
{
ex[i] = ex[i - 1] * ex[1];
}
} /* end single */
#pragma acc update device(ex[0:EXPMAX+1])
{
compute_initial_conditions (u1_r, u1_i);
fft_init (dims[0][0]);
}
#pragma acc update device(u_r[0:NX], u_i[0:NX], \
u1_r[0:NTOTAL], u1_i[0:NTOTAL])
if (TIMERS_ENABLED == TRUE)
{
timer_stop (T_SETUP);
}
if (TIMERS_ENABLED == TRUE)
{
timer_start (T_FFT);
}
//fft (1, u1_r, u1_i, u0_r, u0_i);
//START_FFT//
dir = 1;
logNX = ilog2(NX);
logNY = ilog2(NY);
logNZ = ilog2(NZ);
/*--------------------------------------------------------------------
c note: args u1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xin/xout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------*/
{
if (dir == 1)
{
//cffts1 (1, logNX, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* u1 -> u1 */
is = 1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
y0_r[id] = u1_r[m];
y0_i[id] = u1_i[m];
}
//cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNX; l += 2)
{
//fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNX - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNX)
break;
//fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNX - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNX % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
u1_r[m] = y1_r[id];
u1_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
u1_r[m] = y0_r[id];
u1_i[m] = y0_i[id];
}
}
//cffts2 (1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = 1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
y0_r[id] = u1_r[m];
y0_i[id] = u1_i[m];
}
//cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNY; l += 2)
{
//fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNY - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNY)
break;
//fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNY - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNY % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
u1_r[m] = y1_r[id];
u1_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
u1_r[m] = y0_r[id];
u1_i[m] = y0_i[id];
}
}
//cffts3 (1, logNZ, u1_r, u1_i, u0_r, u0_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */
is = 1;
//cfftz (is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNZ; l += 2)
{
//fftz2 (is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNZ - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = u1_r[i11 + p];
x11imag = u1_i[i11 + p];
x21real = u1_r[i12 + p];
x21imag = u1_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNZ)
break;
//fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNZ - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
u1_r[i21 + p] = x11real + x21real;
u1_i[i21 + p] = x11imag + x21imag;
u1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
u1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNZ % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
u0_r[m] = y1_r[m];
u0_i[m] = y1_i[m];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
u0_r[m] = u1_r[m];
u0_i[m] = u1_i[m];
}
}
}
else
{
//cffts3 (-1, logNZ, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = -1;
//cfftz (is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNZ; l += 2)
{
//fftz2 (is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNZ - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = u1_r[i11 + p];
x11imag = u1_i[i11 + p];
x21real = u1_r[i12 + p];
x21imag = u1_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNZ)
break;
//fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNZ - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
u1_r[i21 + p] = x11real + x21real;
u1_i[i21 + p] = x11imag + x21imag;
u1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
u1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNZ % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
u1_r[m] = y1_r[m];
u1_i[m] = y1_i[m];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
u1_r[m] = u1_r[m];
u1_i[m] = u1_i[m];
}
}
//cffts2 (-1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = -1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
y0_r[id] = u1_r[m];
y0_i[id] = u1_i[m];
}
//cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNY; l += 2)
{
//fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNY - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNY)
break;
//fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNY - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNY % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
u1_r[m] = y1_r[id];
u1_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
u1_r[m] = y0_r[id];
u1_i[m] = y0_i[id];
}
}
//cffts1 (-1, logNX, u1_r, u1_i, u0_r, u0_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */
is = -1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
y0_r[id] = u1_r[m];
y0_i[id] = u1_i[m];
}
//cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNX; l += 2)
{
//fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNX - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNX)
break;
//fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNX - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNX % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
u0_r[m] = y1_r[id];
u0_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
u0_r[m] = y0_r[id];
u0_i[m] = y0_i[id];
}
}
}
}
//END_FFT//
if (TIMERS_ENABLED == TRUE)
{
timer_stop (T_FFT);
}
for (iter = 1; iter <= niter; iter++)
{
if (TIMERS_ENABLED == TRUE)
{
timer_start (T_EVOLVE);
}
//evolve (u0_r, u0_i, u1_r, u1_i, iter, indexmap);
/*--------------------------------------------------------------------
c evolve u0 -> u1 (iter time steps) in fourier space
c-------------------------------------------------------------------*/
#pragma acc kernels loop gang worker independent
for (i = 0; i < NTOTAL; i++)
{
u1_r[i] = u0_r[i] * ex[iter * indexmap[i]];
u1_i[i] = u0_i[i] * ex[iter * indexmap[i]];
}
if (TIMERS_ENABLED == TRUE)
{
timer_stop (T_EVOLVE);
}
if (TIMERS_ENABLED == TRUE)
{
timer_start (T_FFT);
}
//fft (-1, u1_r, u1_i, u2_r, u2_i);
//START_FFT//
dir = -1;
logNX = ilog2(NX);
logNY = ilog2(NY);
logNZ = ilog2(NZ);
/*--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xin/xout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------*/
{
if (dir == 1)
{
//cffts1 (1, logNX, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = 1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
y0_r[id] = u1_r[m];
y0_i[id] = u1_i[m];
}
//cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNX; l += 2)
{
//fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNX - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNX)
break;
//fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNX - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNX % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
u1_r[m] = y1_r[id];
u1_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
u1_r[m] = y0_r[id];
u1_i[m] = y0_i[id];
}
}
//cffts2 (1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = 1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
y0_r[id] = u1_r[m];
y0_i[id] = u1_i[m];
}
//cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNY; l += 2)
{
//fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNY - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNY)
break;
//fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNY - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNY % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
u1_r[m] = y1_r[id];
u1_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
u1_r[m] = y0_r[id];
u1_i[m] = y0_i[id];
}
}
//cffts3 (1, logNZ, u1_r, u1_i, u2_r, u2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */
is = 1;
//cfftz (is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNZ; l += 2)
{
//fftz2 (is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNZ - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = u1_r[i11 + p];
x11imag = u1_i[i11 + p];
x21real = u1_r[i12 + p];
x21imag = u1_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNZ)
break;
//fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNZ - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
u1_r[i21 + p] = x11real + x21real;
u1_i[i21 + p] = x11imag + x21imag;
u1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
u1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNZ % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
u2_r[m] = y1_r[m];
u2_i[m] = y1_i[m];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
u2_r[m] = u1_r[m];
u2_i[m] = u1_i[m];
}
}
}
else
{
//cffts3 (-1, logNZ, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = -1;
//cfftz (is, logNZ, NZ, u1_r, u1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNZ; l += 2)
{
//fftz2 (is, l, logNZ, NZ, u_r, u_i, u1_r, u1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNZ - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = u1_r[i11 + p];
x11imag = u1_i[i11 + p];
x21real = u1_r[i12 + p];
x21imag = u1_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNZ)
break;
//fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, u1_r, u1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNZ - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
u1_r[i21 + p] = x11real + x21real;
u1_i[i21 + p] = x11imag + x21imag;
u1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
u1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNZ % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
u1_r[m] = y1_r[m];
u1_i[m] = y1_i[m];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
u1_r[m] = u1_r[m];
u1_i[m] = u1_i[m];
}
}
//cffts2 (-1, logNY, u1_r, u1_i, u1_r, u1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = -1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
y0_r[id] = u1_r[m];
y0_i[id] = u1_i[m];
}
//cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNY; l += 2)
{
//fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNY - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNY)
break;
//fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNY - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNY % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
u1_r[m] = y1_r[id];
u1_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
u1_r[m] = y0_r[id];
u1_i[m] = y0_i[id];
}
}
//cffts1 (-1, logNX, u1_r, u1_i, u2_r, u2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */
is = -1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
y0_r[id] = u1_r[m];
y0_i[id] = u1_i[m];
}
//cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNX; l += 2)
{
//fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNX - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNX)
break;
//fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNX - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNX % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
u2_r[m] = y1_r[id];
u2_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
u2_r[m] = y0_r[id];
u2_i[m] = y0_i[id];
}
}
}
}
//END_FFT//
if (TIMERS_ENABLED == TRUE)
{
timer_stop (T_FFT);
}
if (TIMERS_ENABLED == TRUE)
{
timer_start (T_CHECKSUM);
}
//checksum (iter, u2_r, u2_i, dims[0]);
chk_r = 0.0F;
chk_i = 0.0F;
#pragma acc kernels loop gang worker independent
for(m = 0; m < 1024; m++){
j = 1+m;
q = j%NX;
if (q >= 0 && q < NX) {
r = (3*j)%NY;
if (r >= 0 && r < NY) {
s = (5*j)%NZ;
if (s >= 0 && s < NZ) {
chk_r = chk_r + u2_r[s*NY*NX + r*NX + q];
chk_i = chk_i + u2_i[s*NY*NX + r*NX + q];
}
}
}
}
//printf("chk_r = %22.12e, chk_i =%22.12e\n", chk_r, chk_i);
{
sums_r[iter] += chk_r;
sums_i[iter] += chk_i;
}
{
sums_r[iter] = sums_r[iter] / (float) (NTOTAL);
sums_i[iter] = sums_i[iter] / (float) (NTOTAL);
printf ("T = %5d Checksum = %22.12e %22.12e\n",
iter, sums_r[iter], sums_i[iter]);
}
if (TIMERS_ENABLED == TRUE)
{
timer_stop (T_CHECKSUM);
}
}
verify (NX, NY, NZ, niter, &verified, &classT);
#if defined(_OPENMP)
nthreads = omp_get_num_threads ();
#endif /* _OPENMP */
} /* end parallel */
timer_stop (T_TOTAL);
total_time = timer_read (T_TOTAL);
if (total_time != 0.0)
{
mflops = 1.0e-6 * (double) (NTOTAL) *
(14.8157 + 7.19641 * log ((double) (NTOTAL))
+ (5.23518 + 7.21113 * log ((double) (NTOTAL))) * niter)
/ total_time;
}
else
{
mflops = 0.0;
}
c_print_results ("FT", classT, NX, NY, NZ, niter, nthreads,
total_time, mflops, " floating point", verified,
NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
if (TIMERS_ENABLED == TRUE)
print_timers ();
return 0;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void
compute_initial_conditions (float u0_r[NTOTAL], float u0_i[NTOTAL])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Fill in array u0 with initial conditions from
c random number generator
c-------------------------------------------------------------------*/
int k;
float x0, start, an, dummy;
static float tmp[NX * 2 * MAXDIM + 1];
int i, j, t;
start = SEED;
/*--------------------------------------------------------------------
c Jump to the starting element for our first plane.
c-------------------------------------------------------------------*/
ipow46 (A, (zstart[0] - 1) * 2 * NX * NY + (ystart[0] - 1) * 2 * NX, &an);
dummy = randlc (&start, an);
ipow46 (A, 2 * NX * NY, &an);
/*--------------------------------------------------------------------
c Go through by z planes filling in one square at a time.
c-------------------------------------------------------------------*/
for (k = 0; k < NZ; k++)
{
x0 = start;
vranlc (2 * NX * NY, &x0, A, tmp);
t = 1;
for (j = 0; j < NY; j++)
for (i = 0; i < NX; i++)
{
u0_r[k * NY * NX + j * NX + i] = tmp[t++];
u0_i[k * NY * NX + j * NX + i] = tmp[t++];
}
if (k != NZ)
dummy = randlc (&start, an);
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void
ipow46 (float a, int exponent, float *result)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute a^exponent mod 2^46
c-------------------------------------------------------------------*/
float dummy, q, r;
int n, n2;
/*--------------------------------------------------------------------
c Use
c a^n = a^(n/2)*a^(n/2) if n even else
c a^n = a*a^(n-1) if n odd
c-------------------------------------------------------------------*/
*result = 1;
if (exponent == 0)
return;
q = a;
r = 1;
n = exponent;
while (n > 1)
{
n2 = n / 2;
if (n2 * 2 == n)
{
dummy = randlc (&q, q);
n = n2;
}
else
{
dummy = randlc (&r, q);
n = n - 1;
}
}
dummy = randlc (&r, q);
*result = r;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void
setup (void)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, i, j, fstatus;
printf ("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - FT Benchmark\n\n");
niter = NITER_DEFAULT;
printf (" Size : %3dx%3dx%3d\n", NX, NY, NZ);
printf (" Iterations : %7d\n", niter);
/* 1004 format(' Number of processes : ', i7)
1005 format(' Processor array : ', i3, 'x', i3)
1006 format(' WARNING: compiled for ', i5, ' processes. ',
> ' Will not verify. ')*/
for (i = 0; i < 3; i++)
{
dims[i][0] = NX;
dims[i][1] = NY;
dims[i][2] = NZ;
}
for (i = 0; i < 3; i++)
{
xstart[i] = 1;
xend[i] = NX;
ystart[i] = 1;
yend[i] = NY;
zstart[i] = 1;
zend[i] = NZ;
}
/*--------------------------------------------------------------------
c Set up info for blocking of ffts and transposes. This improves
c performance on cache-based systems. Blocking involves
c working on a chunk of the problem at a time, taking chunks
c along the first, second, or third dimension.
c
c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim)
c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims)
c Since 1st dim is always in processor, we'll assume it's long enough
c (default blocking factor is 16 so min size for 1st dim is 16)
c The only case we have to worry about is cffts1 in a 2d decomposition.
c so the blocking factor should not be larger than the 2nd dimension.
c-------------------------------------------------------------------*/
fftblock = FFTBLOCK_DEFAULT;
fftblockpad = FFTBLOCKPAD_DEFAULT;
if (fftblock != FFTBLOCK_DEFAULT)
fftblockpad = fftblock + 3;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void
print_timers (void)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int i;
char *tstrings[] = { " total ",
" setup ",
" fft ",
" evolve ",
" checksum ",
" fftlow ",
" fftcopy "
};
for (i = 0; i < T_MAX; i++)
{
if (timer_read (i) != 0.0)
{
printf ("timer %2d(%16s( :%10.6f\n", i, tstrings[i],
timer_read (i));
}
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void
fft (int dir, float x1_r[NTOTAL], float x1_i[NTOTAL],
float x2_r[NTOTAL], float x2_i[NTOTAL])
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static float y0_r[NTOTAL];
static float y0_i[NTOTAL];
static float y1_r[NTOTAL];
static float y1_i[NTOTAL];
int logNX, logNY, logNZ;
///////////////////////
//Used for cffts1(). //
///////////////////////
int i, j, k, jj;
int m, id;
int is;
///////////////////////
// Used for cfftz(). //
///////////////////////
int l;
///////////////////////
// Used for fftz2(). //
///////////////////////
int k_fftz2, n1, li, lj, lk, ku, i_fftz2, i11, i12, i21, i22;
float u1_rf, x11_r, x21_r;
float u1_if, x11_i, x21_i;
int idx, p, nn;
float x11real, x11imag, x21real, x21imag;
//START_FFT//
logNX = ilog2(NX);
logNY = ilog2(NY);
logNZ = ilog2(NZ);
/*--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xin/xout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------*/
#pragma acc data \
copyin(u_r[0:NX], u_i[0:NX]) \
copy(x1_r[0:NTOTAL], x1_i[0:NTOTAL]) \
copyout(x2_r[0:NTOTAL], x2_i[0:NTOTAL]) \
create(y0_r[0:NTOTAL], y0_i[0:NTOTAL]) \
create(y1_r[0:NTOTAL], y1_i[0:NTOTAL])
{
if (dir == 1)
{
//cffts1 (1, logNX, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = 1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
y0_r[id] = x1_r[m];
y0_i[id] = x1_i[m];
}
//cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNX; l += 2)
{
//fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNX - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNX)
break;
//fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNX - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNX % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
x1_r[m] = y1_r[id];
x1_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
x1_r[m] = y0_r[id];
x1_i[m] = y0_i[id];
}
}
//cffts2 (1, logNY, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = 1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
y0_r[id] = x1_r[m];
y0_i[id] = x1_i[m];
}
//cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNY; l += 2)
{
//fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNY - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNY)
break;
//fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNY - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNY % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
x1_r[m] = y1_r[id];
x1_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
x1_r[m] = y0_r[id];
x1_i[m] = y0_i[id];
}
}
//cffts3 (1, logNZ, x1_r, x1_i, x2_r, x2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */
is = 1;
//cfftz (is, logNZ, NZ, x1_r, x1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNZ; l += 2)
{
//fftz2 (is, l, logNZ, NZ, u_r, u_i, x1_r, x1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNZ - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = x1_r[i11 + p];
x11imag = x1_i[i11 + p];
x21real = x1_r[i12 + p];
x21imag = x1_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNZ)
break;
//fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, x1_r, x1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNZ - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
x1_r[i21 + p] = x11real + x21real;
x1_i[i21 + p] = x11imag + x21imag;
x1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
x1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNZ % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
x2_r[m] = y1_r[m];
x2_i[m] = y1_i[m];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
x2_r[m] = x1_r[m];
x2_i[m] = x1_i[m];
}
}
}
else
{
//cffts3 (-1, logNZ, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = -1;
//cfftz (is, logNZ, NZ, x1_r, x1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNZ; l += 2)
{
//fftz2 (is, l, logNZ, NZ, u_r, u_i, x1_r, x1_i, y1_r, y1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNZ - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = x1_r[i11 + p];
x11imag = x1_i[i11 + p];
x21real = x1_r[i12 + p];
x21imag = x1_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNZ)
break;
//fftz2 (is, l + 1, logNZ, NZ, u_r, u_i, y1_r, y1_i, x1_r, x1_i, NX, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NZ; idx++)
{
n1 = NZ / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNZ - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNZ - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
x1_r[i21 + p] = x11real + x21real;
x1_i[i21 + p] = x11imag + x21imag;
x1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
x1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNZ % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
x1_r[m] = y1_r[m];
x1_i[m] = y1_i[m];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
x1_r[m] = x1_r[m];
x1_i[m] = x1_i[m];
}
}
//cffts2 (-1, logNY, x1_r, x1_i, x1_r, x1_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x1 */
is = -1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
y0_r[id] = x1_r[m];
y0_i[id] = x1_i[m];
}
//cfftz (is, logNY, NY, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNY; l += 2)
{
//fftz2 (is, l, logNY, NY, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNY - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNY)
break;
//fftz2 (is, l + 1, logNY, NY, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NX, NZ);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NX * NZ; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NY; idx++)
{
n1 = NY / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNY - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNY - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNY % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
x1_r[m] = y1_r[id];
x1_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = j * NX * NZ + k * NX + i;
x1_r[m] = y0_r[id];
x1_i[m] = y0_i[id];
}
}
//cffts1 (-1, logNX, x1_r, x1_i, x2_r, x2_i, y0_r, y0_i, y1_r, y1_i); /* x1 -> x2 */
is = -1;
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
y0_r[id] = x1_r[m];
y0_i[id] = x1_i[m];
}
//cfftz (is, logNX, NX, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------*/
for (l = 1; l <= logNX; l += 2)
{
//fftz2 (is, l, logNX, NX, u_r, u_i, y0_r, y0_i, y1_r, y1_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l - 1 == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l - 1) - 1);
}
if (logNX - l == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - l) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y0_r[i11 + p];
x11imag = y0_i[i11 + p];
x21real = y0_r[i12 + p];
x21imag = y0_i[i12 + p];
y1_r[i21 + p] = x11real + x21real;
y1_i[i21 + p] = x11imag + x21imag;
y1_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y1_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
if (l == logNX)
break;
//fftz2 (is, l + 1, logNX, NX, u_r, u_i, y1_r, y1_i, y0_r, y0_i, NZ, NY);
/*--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------*/
nn = NZ * NY; //number of threads
#pragma acc kernels loop gang worker independent
for (idx = 0; idx < NTOTAL / NX; idx++)
{
n1 = NX / 2;
if (l == 0)
{
lk = 1;
}
else
{
lk = 2 << ((l) - 1);
}
if (logNX - (l+1) == 0)
{
li = 1;
}
else
{
li = 2 << ((logNX - (l+1)) - 1);
}
lj = 2 * lk;
ku = li;
for (i_fftz2 = 0; i_fftz2 < li; i_fftz2++)
{
i11 = idx + i_fftz2 * lk * nn;
i12 = i11 + n1 * nn;
i21 = idx + i_fftz2 * lj * nn;
i22 = i21 + lk * nn;
if (is >= 1)
{
u1_rf = u_r[ku + i_fftz2];
u1_if = u_i[ku + i_fftz2];
}
else
{
u1_rf = u_r[ku + i_fftz2];
u1_if = -u_i[ku + i_fftz2];
}
/*--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------*/
for (k_fftz2 = 0; k_fftz2 < lk; k_fftz2++)
{
p = k_fftz2 * nn;
x11real = y1_r[i11 + p];
x11imag = y1_i[i11 + p];
x21real = y1_r[i12 + p];
x21imag = y1_i[i12 + p];
y0_r[i21 + p] = x11real + x21real;
y0_i[i21 + p] = x11imag + x21imag;
y0_r[i22 + p] =
u1_rf * (x11real - x21real) - u1_if * (x11imag - x21imag);
y0_i[i22 + p] =
u1_rf * (x11imag - x21imag) + u1_if * (x11real - x21real);
}
}
}
}
if (logNX % 2 == 1)
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
x2_r[m] = y1_r[id];
x2_i[m] = y1_i[id];
}
}
else
{
#pragma acc kernels loop gang worker independent
for (m = 0; m < NTOTAL; m++)
{
i = m % NX;
k = m / NX;
j = k % NY;
k = k / NY;
id = i * NZ * NY + j * NZ + k;
x2_r[m] = y0_r[id];
x2_i[m] = y0_i[id];
}
}
}
}
//END_FFT//
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void
fft_init (int n)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the roots-of-unity array that will be used for subsequent FFTs.
c-------------------------------------------------------------------*/
int m, nu, ku, i, j, ln;
float t, ti;
/*--------------------------------------------------------------------
c Initialize the U array with sines and cosines in a manner that permits
c stride one access at each FFT iteration.
c-------------------------------------------------------------------*/
nu = n;
m = ilog2 (n);
u_r[0] = (float) m;
u_i[0] = 0.0;
ku = 1;
ln = 1;
for (j = 1; j <= m; j++)
{
t = PI / ln;
for (i = 0; i <= ln - 1; i++)
{
ti = i * t;
u_r[i + ku] = cos (ti);
u_i[i + ku] = sin (ti);
}
ku = ku + ln;
ln = 2 * ln;
}
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static int
ilog2 (int n)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int nn, lg;
if (n == 1)
{
return 0;
}
lg = 1;
nn = 2;
while (nn < n)
{
nn = nn << 1;
lg++;
}
return lg;
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void
verify (int d1, int d2, int d3, int nt, boolean * verified, char *classT)
{
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
int ierr, size, i;
double err, epsilon;
/*--------------------------------------------------------------------
c Sample size reference checksums
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Class S size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_s[6 + 1] = { 0.0,
5.546087004964e+02,
5.546385409189e+02,
5.546148406171e+02,
5.545423607415e+02,
5.544255039624e+02,
5.542683411902e+02
};
double vdata_imag_s[6 + 1] = { 0.0,
4.845363331978e+02,
4.865304269511e+02,
4.883910722336e+02,
4.901273169046e+02,
4.917475857993e+02,
4.932597244941e+02
};
/*--------------------------------------------------------------------
c Class W size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_w[6 + 1] = { 0.0,
5.673612178944e+02,
5.631436885271e+02,
5.594024089970e+02,
5.560698047020e+02,
5.530898991250e+02,
5.504159734538e+02
};
double vdata_imag_w[6 + 1] = { 0.0,
5.293246849175e+02,
5.282149986629e+02,
5.270996558037e+02,
5.260027904925e+02,
5.249400845633e+02,
5.239212247086e+02
};
/*--------------------------------------------------------------------
c Class A size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_a[6 + 1] = { 0.0,
5.046735008193e+02,
5.059412319734e+02,
5.069376896287e+02,
5.077892868474e+02,
5.085233095391e+02,
5.091487099959e+02
};
double vdata_imag_a[6 + 1] = { 0.0,
5.114047905510e+02,
5.098809666433e+02,
5.098144042213e+02,
5.101336130759e+02,
5.104914655194e+02,
5.107917842803e+02
};
/*--------------------------------------------------------------------
c Class B size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_b[20 + 1] = { 0.0,
5.177643571579e+02,
5.154521291263e+02,
5.146409228649e+02,
5.142378756213e+02,
5.139626667737e+02,
5.137423460082e+02,
5.135547056878e+02,
5.133910925466e+02,
5.132470705390e+02,
5.131197729984e+02,
5.130070319283e+02,
5.129070537032e+02,
5.128182883502e+02,
5.127393733383e+02,
5.126691062020e+02,
5.126064276004e+02,
5.125504076570e+02,
5.125002331720e+02,
5.124551951846e+02,
5.124146770029e+02
};
double vdata_imag_b[20 + 1] = { 0.0,
5.077803458597e+02,
5.088249431599e+02,
5.096208912659e+02,
5.101023387619e+02,
5.103976610617e+02,
5.105948019802e+02,
5.107404165783e+02,
5.108576573661e+02,
5.109577278523e+02,
5.110460304483e+02,
5.111252433800e+02,
5.111968077718e+02,
5.112616233064e+02,
5.113203605551e+02,
5.113735928093e+02,
5.114218460548e+02,
5.114656139760e+02,
5.115053595966e+02,
5.115415130407e+02,
5.115744692211e+02
};
/*--------------------------------------------------------------------
c Class C size reference checksums
c-------------------------------------------------------------------*/
double vdata_real_c[20 + 1] = { 0.0,
5.195078707457e+02,
5.155422171134e+02,
5.144678022222e+02,
5.140150594328e+02,
5.137550426810e+02,
5.135811056728e+02,
5.134569343165e+02,
5.133651975661e+02,
5.132955192805e+02,
5.132410471738e+02,
5.131971141679e+02,
5.131605205716e+02,
5.131290734194e+02,
5.131012720314e+02,
5.130760908195e+02,
5.130528295923e+02,
5.130310107773e+02,
5.130103090133e+02,
5.129905029333e+02,
5.129714421109e+02
};
double vdata_imag_c[20 + 1] = { 0.0,
5.149019699238e+02,
5.127578201997e+02,
5.122251847514e+02,
5.121090289018e+02,
5.121143685824e+02,
5.121496764568e+02,
5.121870921893e+02,
5.122193250322e+02,
5.122454735794e+02,
5.122663649603e+02,
5.122830879827e+02,
5.122965869718e+02,
5.123075927445e+02,
5.123166486553e+02,
5.123241541685e+02,
5.123304037599e+02,
5.123356167976e+02,
5.123399592211e+02,
5.123435588985e+02,
5.123465164008e+02
};
epsilon = 1.0e-12;
*verified = TRUE;
*classT = 'U';
if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6)
{
*classT = 'S';
for (i = 1; i <= nt; i++)
{
err = (sums_r[i] - vdata_real_s[i]) / vdata_real_s[i];
if (fabs (err) > epsilon)
{
*verified = FALSE;
break;
}
err = (sums_i[i] - vdata_imag_s[i]) / vdata_imag_s[i];
if (fabs (err) > epsilon)
{
*verified = FALSE;
break;
}
}
}
else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6)
{
*classT = 'W';
for (i = 1; i <= nt; i++)
{
err = (sums_r[i] - vdata_real_w[i]) / vdata_real_w[i];
if (fabs (err) > epsilon)
{
*verified = FALSE;
break;
}
err = (sums_i[i] - vdata_imag_w[i]) / vdata_imag_w[i];
if (fabs (err) > epsilon)
{
*verified = FALSE;
break;
}
}
}
else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6)
{
*classT = 'A';
for (i = 1; i <= nt; i++)
{
err = (sums_r[i] - vdata_real_a[i]) / vdata_real_a[i];
if (fabs (err) > epsilon)
{
*verified = FALSE;
break;
}
err = (sums_i[i] - vdata_imag_a[i]) / vdata_imag_a[i];
if (fabs (err) > epsilon)
{
*verified = FALSE;
break;
}
}
}
else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20)
{
*classT = 'B';
for (i = 1; i <= nt; i++)
{
err = (sums_r[i] - vdata_real_b[i]) / vdata_real_b[i];
if (fabs (err) > epsilon)
{
*verified = FALSE;
break;
}
err = (sums_i[i] - vdata_imag_b[i]) / vdata_imag_b[i];
if (fabs (err) > epsilon)
{
*verified = FALSE;
break;
}
}
}
else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20)
{
*classT = 'C';
for (i = 1; i <= nt; i++)
{
err = (sums_r[i] - vdata_real_c[i]) / vdata_real_c[i];
if (fabs (err) > epsilon)
{
*verified = FALSE;
break;
}
err = (sums_i[i] - vdata_imag_c[i]) / vdata_imag_c[i];
if (fabs (err) > epsilon)
{
*verified = FALSE;
break;
}
}
}
if (*classT != 'U')
{
printf ("Result verification successful\n");
}
else
{
printf ("Result verification failed\n");
}
printf ("class = %1c\n", *classT);
}
|
critical.c | /*
* critical.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run | FileCheck %s
// REQUIRES: tsan
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0;
#pragma omp parallel num_threads(8) shared(var)
{
#pragma omp critical
{ var++; }
}
fprintf(stderr, "DONE\n");
int error = (var != 8);
return error;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK: DONE
|
dyn_pr.h | #ifndef DYN_PR_H_
#define DYN_PR_H_
#include "traversal.h"
#include "../common/timer.h"
#include "sliding_queue_dynamic.h"
#include "../common/pvector.h"
#include <cmath>
#include <iostream>
/* Algorithm: Incremental PageRank and PageRank starting from scratch */
#include <fstream>
extern std::ofstream algF;
typedef float Rank;
const float kDamp = 0.85;
const float PRThreshold = 0.0000001;
template<typename T>
void PRIter0(T* ds, SlidingQueue<NodeID>& queue, Rank base_score)
{
pvector<Rank> outgoing_contrib(ds->num_nodes, 0);
pvector<bool> visited(ds->num_nodes, false);
#pragma omp parallel for schedule(dynamic, 64)
for(NodeID n=0; n < ds->num_nodes; n++) {
outgoing_contrib[n] = ds->property[n] / (ds->out_degree(n));
}
#pragma omp parallel
{
QueueBuffer<NodeID> lqueue(queue);
#pragma omp for schedule(dynamic, 64)
for (NodeID n=0; n < ds->num_nodes; n++) {
if (ds->affected[n]) {
Rank old_rank = ds->property[n];
Rank incoming_total = 0;
for(auto v: in_neigh(n, ds)){
incoming_total += outgoing_contrib[v];
}
ds->property[n] = base_score + kDamp * incoming_total;
bool trigger = fabs(ds->property[n] - old_rank) > PRThreshold;
if (trigger) {
//put the out-neighbors into active list
for (auto v: out_neigh(n, ds)) {
bool curr_val = visited[v];
if (!curr_val) {
if (compare_and_swap(visited[v], curr_val, true))
lqueue.push_back(v);
}
}
}
}
}
lqueue.flush();
}
}
template<typename T>
void dynPRAlg(T* ds)
{
std::cout << "Running dynamic PR" << std::endl;
Timer t;
t.Start();
SlidingQueue<NodeID> queue(ds->num_nodes);
const Rank base_score = (1.0f - kDamp)/(ds->num_nodes);
// set all new vertices' rank to 1/num_nodes, otherwise reuse old values
#pragma omp parallel for schedule(dynamic, 64)
for (NodeID n = 0; n < ds->num_nodes; n++) {
if (ds->property[n] == -1) {
ds->property[n] = 1.0f/(ds->num_nodes);
}
}
// Iteration 0 only on affected vertices
PRIter0(ds, queue, base_score);
//cout << "Done iter 0" << endl;
queue.slide_window();
/*ofstream out("queueSizeParallel.csv", std::ios_base::app);
out << queue.size() << std::endl;
std::cout << "Queue Size: " << queue.size() << std::endl;
out.close();*/
// Iteration 1 onward, process vertices in the queue
while (!queue.empty()) {
//std::cout << "Not empty queue, Queue Size:" << queue.size() << std::endl;
pvector<Rank> outgoing_contrib(ds->num_nodes, 0);
pvector<bool> visited(ds->num_nodes, false);
#pragma omp parallel for
for (NodeID n=0; n < ds->num_nodes; n++) {
outgoing_contrib[n] = ds->property[n]/(ds->out_degree(n));
}
#pragma omp parallel
{
QueueBuffer<NodeID> lqueue(queue);
#pragma omp for schedule(dynamic, 64)
for (auto q_iter = queue.begin(); q_iter < queue.end(); q_iter++) {
NodeID n = *q_iter;
Rank old_rank = ds->property[n];
Rank incoming_total = 0;
for(auto v: in_neigh(n, ds))
incoming_total += outgoing_contrib[v];
ds->property[n] = base_score + kDamp * incoming_total;
bool trigger = fabs(ds->property[n] - old_rank) > PRThreshold;
if (trigger) {
//put the out-neighbors into active list
for (auto v: out_neigh(n, ds)) {
bool curr_val = visited[v];
if (!curr_val) {
if (compare_and_swap(visited[v], curr_val, true))
lqueue.push_back(v);
}
}
}
}
lqueue.flush();
}
queue.slide_window();
}
// clear affected array to get ready for the next update round
#pragma omp parallel for schedule(dynamic, 64)
for (NodeID i = 0; i < ds->num_nodes; i++) {
ds->affected[i] = false;
}
t.Stop();
algF << t.Seconds() << std::endl;
//cout << "Done" << endl;
}
template<typename T>
void PRStartFromScratch(T* ds)
{
std::cout << "Running PR from scratch" << std::endl;
Timer t;
t.Start();
const Rank base_score = (1.0f - kDamp)/(ds->num_nodes);
int max_iters = 10;
double epsilon = 0.0001;
// Reset ALL property values
#pragma omp parallel for
for (NodeID n = 0; n < ds->num_nodes; n++) {
ds->property[n] = 1.0f / (ds->num_nodes);
}
pvector<Rank> outgoing_contrib(ds->num_nodes, 0);
for (int iter = 0; iter < max_iters; iter++) {
double error = 0;
#pragma omp parallel for
for (NodeID n = 0; n < ds->num_nodes; n++) {
outgoing_contrib[n] = ds->property[n]/(ds->out_degree(n));
}
#pragma omp parallel for reduction(+ : error) schedule(dynamic, 64)
for (NodeID u = 0; u < ds->num_nodes; u++) {
Rank incoming_total = 0;
for (NodeID v : in_neigh(u, ds))
incoming_total += outgoing_contrib[v];
Rank old_rank = ds->property[u];
ds->property[u] = base_score + kDamp * incoming_total;
error += fabs(ds->property[u] - old_rank);
}
//std::cout << "Epsilon: "<< epsilon << std::endl;
//printf(" %2d %lf\n", iter, error);
if (error < epsilon)
break;
}
t.Stop();
algF << t.Seconds() << std::endl;
}
#endif // DYN_PR_H
|
par_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
//fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; */
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
{
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
{
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
}
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
{
P_marker[i] = 0;
}
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
{
if (CF_marker[i] == -3) CF_marker[i] = -1;
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpHE
* interpolation routine for hyperbolic PDEs
* treats weak fine connections like strong fine connections
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly influence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(A_ext);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildDirInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildDirInterpHost( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real diagonal;
HYPRE_Real sum_N_pos, sum_P_pos;
HYPRE_Real sum_N_neg, sum_P_neg;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd,sum_P_pos,sum_P_neg,sum_N_pos,sum_N_neg,alfa,beta) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
HYPRE_Int *P_marker, *P_marker_offd;
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
sum_N_pos = 0;
sum_N_neg = 0;
sum_P_pos = 0;
sum_P_neg = 0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (num_functions == 1 || dof_func[i1] == dof_func[i])
{
if (A_diag_data[jj] > 0)
sum_N_pos += A_diag_data[jj];
else
sum_N_neg += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
if (A_diag_data[jj] > 0)
sum_P_pos += A_diag_data[jj];
else
sum_P_neg += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])
{
if (A_offd_data[jj] > 0)
sum_N_pos += A_offd_data[jj];
else
sum_N_neg += A_offd_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
if (A_offd_data[jj] > 0)
sum_P_pos += A_offd_data[jj];
else
sum_P_neg += A_offd_data[jj];
}
}
}
if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal;
if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
if (P_diag_data[jj]> 0)
P_diag_data[jj] *= -beta;
else
P_diag_data[jj] *= -alfa;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
if (P_offd_data[jj]> 0)
P_offd_data[jj] *= -beta;
else
P_offd_data[jj] *= -alfa;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
HYPRE_Int *P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
HYPRE_Int interp_type,
hypre_ParCSRMatrix **P_ptr)
{
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPushRange("DirInterp");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_BoomerAMGBuildDirInterpDevice(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A,
interp_type, P_ptr);
}
else
#endif
{
ierr = hypre_BoomerAMGBuildDirInterpHost(A,CF_marker,S,num_cpts_global,num_functions,dof_func,
debug_flag,trunc_factor,max_elmts,col_offd_S_to_A, P_ptr);
}
#if defined(HYPRE_USING_CUDA)
hypre_NvtxPopRange();
#endif
return ierr;
}
/*------------------------------------------------
* Drop entries in interpolation matrix P
* max_elmts == 0 means no limit on rownnz
*------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
if (trunc_factor <= 0.0 && max_elmts == 0)
{
return 0;
}
#if defined(HYPRE_USING_CUDA)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(P) );
if (exec == HYPRE_EXEC_DEVICE)
{
return hypre_BoomerAMGInterpTruncationDevice(P, trunc_factor, max_elmts);
}
else
#endif
{
HYPRE_Int rescale = 1; // rescale P
HYPRE_Int nrm_type = 0; // Use infty-norm of row to perform treshold dropping
return hypre_ParCSRMatrixTruncate(P, trunc_factor, max_elmts, rescale, nrm_type);
}
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach.
* here we need to pass in a strength matrix built on the entire matrix.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
//HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int kc;
HYPRE_BigInt big_k;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_k = A_ext_j[j];
if (big_k >= col_1 && big_k < col_n)
{
A_ext_j[index] = big_k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = (HYPRE_BigInt)(-kc-1);
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
HERE, we only want to distribut to points of the SAME function type
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0 )
{
sum += A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
}
else /* sum = 0 - only add to diag if the same function type */
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal. (only if the same function type)
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
AGAIN, we only want to distribut to points of the SAME function type
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = (HYPRE_Int)A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
}
else /* sum = 0 */
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGTruncandBuild
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGTruncandBuild( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
hypre_ParCSRCommPkg *commpkg_P = hypre_ParCSRMatrixCommPkg(P);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(P_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_offd);
HYPRE_BigInt *new_col_map_offd;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int P_offd_size=0, new_num_cols_offd;
HYPRE_Int *P_marker;
HYPRE_Int i;
HYPRE_Int index;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_size = P_offd_i[n_fine];
}
new_num_cols_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < num_cols_offd; i++)
P_marker[i] = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
new_num_cols_offd++;
P_marker[index] = 1;
}
}
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST);
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, new_num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < new_num_cols_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
new_num_cols_offd);
}
index = 0;
for (i = 0; i < new_num_cols_offd; i++)
{
while (P_marker[index] == 0) index++;
new_col_map_offd[i] = col_map_offd[index];
index++;
}
if (P_offd_size) hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (new_num_cols_offd)
{
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(col_map_offd, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_num_cols_offd;
}
if (commpkg_P != NULL) hypre_MatvecCommPkgDestroy(commpkg_P);
hypre_MatvecCommPkgCreate(P);
return hypre_error_flag;
}
hypre_ParCSRMatrix *hypre_CreateC( hypre_ParCSRMatrix *A,
HYPRE_Real w)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *C_diag;
hypre_CSRMatrix *C_offd;
HYPRE_Real *C_diag_data;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j;
HYPRE_Real *C_offd_data;
HYPRE_Int *C_offd_i;
HYPRE_Int *C_offd_j;
HYPRE_BigInt *col_map_offd_C;
HYPRE_Int i, j, index;
HYPRE_Real invdiag;
HYPRE_Real w_local = w;
C = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_rows, row_starts,
row_starts, num_cols_offd, A_diag_i[num_rows], A_offd_i[num_rows]);
hypre_ParCSRMatrixInitialize(C);
C_diag = hypre_ParCSRMatrixDiag(C);
C_offd = hypre_ParCSRMatrixOffd(C);
C_diag_i = hypre_CSRMatrixI(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
C_offd_i = hypre_CSRMatrixI(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
col_map_offd_C = hypre_ParCSRMatrixColMapOffd(C);
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
for (i=0; i < num_cols_offd; i++)
col_map_offd_C[i] = col_map_offd_A[i];
for (i=0; i < num_rows; i++)
{
index = A_diag_i[i];
invdiag = -w/A_diag_data[index];
C_diag_data[index] = 1.0-w;
C_diag_j[index] = A_diag_j[index];
if (w == 0)
{
w_local = fabs(A_diag_data[index]);
for (j = index+1; j < A_diag_i[i+1]; j++)
w_local += fabs(A_diag_data[j]);
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
w_local += fabs(A_offd_data[j]);
invdiag = -1/w_local;
C_diag_data[index] = 1.0-A_diag_data[index]/w_local;
}
C_diag_i[i] = index;
C_offd_i[i] = A_offd_i[i];
for (j = index+1; j < A_diag_i[i+1]; j++)
{
C_diag_data[j] = A_diag_data[j]*invdiag;
C_diag_j[j] = A_diag_j[j];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
C_offd_data[j] = A_offd_data[j]*invdiag;
C_offd_j[j] = A_offd_j[j];
}
}
C_diag_i[num_rows] = A_diag_i[num_rows];
C_offd_i[num_rows] = A_offd_i[num_rows];
return C;
}
/* RL */
HYPRE_Int
hypre_BoomerAMGBuildInterpOnePnt( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
/* csr's */
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
/* arrays */
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int num_cols_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
/* CF marker off-diag part */
HYPRE_Int *CF_marker_offd = NULL;
/* func type off-diag part */
HYPRE_Int *dof_func_offd = NULL;
/* nnz */
HYPRE_Int nnz_diag, nnz_offd, cnt_diag, cnt_offd;
HYPRE_Int *marker_diag, *marker_offd = NULL;
/* local size */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
/* number of C-pts */
HYPRE_Int n_cpts = 0;
/* fine to coarse mapping: diag part and offd part */
HYPRE_Int *fine_to_coarse;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_BigInt total_global_cpts, my_first_cpt;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_int_buf_data = NULL;
//HYPRE_Int col_start = hypre_ParCSRMatrixFirstRowIndex(A);
//HYPRE_Int col_end = col_start + n_fine;
HYPRE_Int i, j, i1, j1, k1, index, start;
HYPRE_Int *max_abs_cij;
char *max_abs_diag_offd;
HYPRE_Real max_abs_aij, vv;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
/* CF marker for the off-diag columns */
if (num_cols_A_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
}
/* function type indicator for the off-diag columns */
if (num_functions > 1 && num_cols_A_offd)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
}
/* if CommPkg of A is not present, create it */
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* number of sends to do (number of procs) */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* send buffer, of size send_map_starts[num_sends]),
* i.e., number of entries to send */
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST);
/* copy CF markers of elements to send to buffer
* RL: why copy them with two for loops? Why not just loop through all in one */
index = 0;
for (i = 0; i < num_sends; i++)
{
/* start pos of elements sent to send_proc[i] */
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
/* loop through all elems to send_proc[i] */
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
/* CF marker of send_map_elemts[j] */
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
/* create a handle to start communication. 11: for integer */
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd);
/* destroy the handle to finish communication */
hypre_ParCSRCommHandleDestroy(comm_handle);
/* do a similar communication for dof_func */
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
hypre_TFree(int_buf_data,HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping,
* and find the most strongly influencing C-pt for each F-pt
*-----------------------------------------------------------------------*/
/* nnz in diag and offd parts */
cnt_diag = 0;
cnt_offd = 0;
max_abs_cij = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
max_abs_diag_offd = hypre_CTAlloc(char, n_fine,HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
/* markers initialized as zeros */
marker_diag = hypre_CTAlloc(HYPRE_Int, n_fine,HYPRE_MEMORY_HOST);
marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd,HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
//fine_to_coarse[i] = my_first_cpt + n_cpts;
fine_to_coarse[i] = n_cpts;
n_cpts++;
continue;
}
/* mark all the strong connections: in S */
HYPRE_Int MARK = i + 1;
/* loop through row i of S, diag part */
for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++)
{
marker_diag[S_diag_j[j]] = MARK;
}
/* loop through row i of S, offd part */
if (num_procs > 1)
{
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
j1 = col_offd_S_to_A ? col_offd_S_to_A[S_offd_j[j]] : S_offd_j[j];
marker_offd[j1] = MARK;
}
}
fine_to_coarse[i] = -1;
/*---------------------------------------------------------------------------
* If i is an F-pt, interpolation is from the most strongly influencing C-pt
* Find this C-pt and save it
*--------------------------------------------------------------------------*/
/* if we failed to find any strong C-pt, mark this point as an 'n' */
char marker = 'n';
/* max abs val */
max_abs_aij = -1.0;
/* loop through row i of A, diag part */
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
i1 = A_diag_j[j];
vv = fabs(A_diag_data[j]);
#if 0
/* !!! this is a hack just for code verification purpose !!!
it basically says:
1. if we see |a_ij| < 1e-14, force it to be 1e-14
2. if we see |a_ij| == the max(|a_ij|) so far exactly,
replace it if the j idx is smaller
Reasons:
1. numerical round-off for eps-level values
2. entries in CSR rows may be listed in different orders
*/
vv = vv < 1e-14 ? 1e-14 : vv;
if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK &&
vv == max_abs_aij && i1 < max_abs_cij[i])
{
/* mark it as a 'd' */
marker = 'd';
max_abs_cij[i] = i1;
max_abs_aij = vv;
continue;
}
#endif
/* it is a strong C-pt and has abs val larger than what have seen */
if (CF_marker[i1] >= 0 && marker_diag[i1] == MARK && vv > max_abs_aij)
{
/* mark it as a 'd' */
marker = 'd';
max_abs_cij[i] = i1;
max_abs_aij = vv;
}
}
/* offd part */
if (num_procs > 1)
{
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
i1 = A_offd_j[j];
vv = fabs(A_offd_data[j]);
if (CF_marker_offd[i1] >= 0 && marker_offd[i1] == MARK && vv > max_abs_aij)
{
/* mark it as an 'o' */
marker = 'o';
max_abs_cij[i] = i1;
max_abs_aij = vv;
}
}
}
max_abs_diag_offd[i] = marker;
if (marker == 'd')
{
cnt_diag ++;
}
else if (marker == 'o')
{
cnt_offd ++;
}
}
nnz_diag = cnt_diag + n_cpts;
nnz_offd = cnt_offd;
/*------------- allocate arrays */
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag,HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, nnz_diag,HYPRE_MEMORY_HOST);
/* not in ``if num_procs > 1'',
* allocation needed even for empty CSR */
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1,HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd,HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, nnz_offd,HYPRE_MEMORY_HOST);
/* redundant */
P_diag_i[0] = 0;
P_offd_i[0] = 0;
/* reset counters */
cnt_diag = 0;
cnt_offd = 0;
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd,HYPRE_MEMORY_HOST);
big_int_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
big_int_buf_data[index++] = my_first_cpt
+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_int_buf_data, fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
/*-----------------------------------------------------------------------
* Second Pass: Populate P
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] >= 0)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity.
*--------------------------------------------------------------------*/
//P_diag_j[cnt_diag] = fine_to_coarse[i] - my_first_cpt;
P_diag_j[cnt_diag] = fine_to_coarse[i];
P_diag_data[cnt_diag++] = 1.0;
}
else
{
/*---------------------------------------------------------------------------
* If i is an F-pt, interpolation is from the most strongly influencing C-pt
*--------------------------------------------------------------------------*/
if (max_abs_diag_offd[i] == 'd')
{
/* on diag part of P */
j = max_abs_cij[i];
//P_diag_j[cnt_diag] = fine_to_coarse[j] - my_first_cpt;
P_diag_j[cnt_diag] = fine_to_coarse[j];
P_diag_data[cnt_diag++] = 1.0;
}
else if (max_abs_diag_offd[i] == 'o')
{
/* on offd part of P */
j = max_abs_cij[i];
P_offd_j[cnt_offd] = j;
P_offd_data[cnt_offd++] = 1.0;
}
}
P_diag_i[i+1] = cnt_diag;
P_offd_i[i+1] = cnt_offd;
}
hypre_assert(cnt_diag == nnz_diag);
hypre_assert(cnt_offd == nnz_offd);
/* num of cols in the offd part of P */
num_cols_offd_P = 0;
/* marker_offd: all -1 */
for (i = 0; i < num_cols_A_offd; i++)
{
marker_offd[i] = -1;
}
for (i = 0; i < nnz_offd; i++)
{
i1 = P_offd_j[i];
if (marker_offd[i1] == -1)
{
num_cols_offd_P++;
marker_offd[i1] = 1;
}
}
/* col_map_offd_P: the col indices of the offd of P
* we first keep them be the offd-idx of A */
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P,HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P,HYPRE_MEMORY_HOST);
for (i = 0, i1 = 0; i < num_cols_A_offd; i++)
{
if (marker_offd[i] == 1)
{
tmp_map_offd[i1++] = i;
}
}
hypre_assert(i1 == num_cols_offd_P);
/* now, adjust P_offd_j to local idx w.r.t col_map_offd_R
* by searching */
for (i = 0; i < nnz_offd; i++)
{
i1 = P_offd_j[i];
k1 = hypre_BinarySearch(tmp_map_offd, i1, num_cols_offd_P);
/* search must succeed */
hypre_assert(k1 >= 0 && k1 < num_cols_offd_P);
P_offd_j[i] = k1;
}
/* change col_map_offd_P to global coarse ids */
for (i = 0; i < num_cols_offd_P; i++)
{
col_map_offd_P[i] = fine_to_coarse_offd[tmp_map_offd[i]];
}
/* Now, we should have everything of Parcsr matrix P */
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumCols(A), /* global num of rows */
total_global_cpts, /* global num of cols */
hypre_ParCSRMatrixColStarts(A), /* row_starts */
num_cpts_global, /* col_starts */
num_cols_offd_P, /* num cols offd */
nnz_diag,
nnz_offd);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
/* P does not own ColStarts, since A does */
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
/* create CommPkg of P */
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
/* free workspace */
hypre_TFree(CF_marker_offd,HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd,HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd,HYPRE_MEMORY_HOST);
hypre_TFree(big_int_buf_data,HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse,HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offd,HYPRE_MEMORY_HOST);
hypre_TFree(marker_diag,HYPRE_MEMORY_HOST);
hypre_TFree(marker_offd,HYPRE_MEMORY_HOST);
hypre_TFree(max_abs_cij,HYPRE_MEMORY_HOST);
hypre_TFree(max_abs_diag_offd,HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
blocked.c | //
// blocked.c
//
// Created by Hussian Alamri on April 2013
//
#include "blocked.h"
/******* BCRS functions *******/
int countBlocks(MATRIX* my_m) {
int i, j, nblocks = 0;
int nrows = my_m->nrows;
int ncols = my_m->ncols;
double** mal = my_m->mel;
for (i = 0; i < nrows; ++i) {
for (j = 0; j < ncols; ++j) {
if (mal[i][j] != 0) {
// is this the first element in the block?
if (j == 0) {
nblocks++;
} else if ((j != 0) && (mal[i][j-1] == 0)) {
nblocks++;
}
}
}
}
return nblocks;
}
BCRS* CreateBCRS(MATRIX* my_m) {
int i, jj, k, index, block_number = 0;
int nrows = my_m->nrows;
int ncols = my_m->ncols;
int nnz = my_m->nnz;
double** mal = my_m->mel;
double current;
BCRS* cc = (BCRS*)malloc(sizeof(BCRS));
int nblocks = countBlocks(my_m);
int* colInd = malloc(nblocks * sizeof(int));
int* rowPtr = malloc((nrows+1) * sizeof(int));
int* nnzPtr = malloc((nblocks+1) * sizeof(int));
double* value = malloc(nnz * sizeof(double));
for(i=0; i<nrows+1; ++i) {
rowPtr[i] = 0;
}
index = 0;
for(k=0; k<nrows; k++){
for(jj=0; jj<ncols; ++jj){
if (mal[k][jj] != 0) {
if ((jj == 0) || (jj != 0 &&(mal[k][jj-1] == 0))) {
colInd[block_number] = jj;
nnzPtr[block_number] = index;
block_number++;
}
value[index] = mal[k][jj];
index++;
}
}
rowPtr[k+1] = block_number;
}
nnzPtr[nblocks] = nnz;
cc->colInd = colInd;
cc->rowPtr = rowPtr;
cc->value = value;
cc->nnzPtr = nnzPtr;
cc->nrows = nrows;
cc->ncols = ncols;
cc->nnz = nnz;
cc->nblocks = nblocks;
return cc;
}
void PrintBCRS(BCRS *c) {
printf("To-Do: Not implemented");
}
void MultiplyBCRS(BCRS *c, double *x, double *r) {
int i, j, s, startCol, k, tt;
double temp;
int *rowPtr = c->rowPtr;
int *nnzPtr = c->nnzPtr;
int *colInd = c->colInd;
int nrows = c->nrows;
int ncols = c->ncols;
double *value = c->value;
int nnz = c->nnz;
double t;
memset(r, (int)0, ncols*sizeof(double));
#pragma omp parallel for default(shared) private(i, j, k, startCol, t, tt)
for (i = 0; i < nrows; ++i) {
t = 0.0;
for (j = rowPtr[i]; j < rowPtr[i+1]; ++j) {
startCol = colInd[j];
tt = 0;
#pragma ivdep
for (k = nnzPtr[j]; k < nnzPtr[j+1]; ++k) {
t += value[k]*x[startCol+tt];
tt++;
}
}
r[i] = t;
}
}
void DestroyBCRS(BCRS *cc) {
free(cc->colInd);
free(cc->rowPtr);
free(cc->value);
free(cc->nnzPtr);
free(cc);
}
/******* ELL functions *******/
int compute_max_entries_per_row(MATRIX* m)
{
int max_entries_per_row = 0;
int current = 0;
int i, j;
for (i = 0; i < m->nrows; ++i) {
current = 0;
for (j = 0; j < m->ncols; ++j) {
if(m->mel[i][j] != 0) {
current++;
}
}
max_entries_per_row = max(current, max_entries_per_row);
}
return max_entries_per_row;
}
ELL* CreateELL(MATRIX *m) {
ELL* ell = (ELL*) malloc(sizeof(ELL));
double** values;
int** indices;
int nnz = m->nnz;
int ncols = m->ncols;
int nrows = m->nrows;
int i, j, jj;
double** mal = m->mel;
int max_entries_per_row = compute_max_entries_per_row(m);
values = (double**) malloc(m->nrows * sizeof(double*));
for (i = 0; i < m->nrows; ++i) {
values[i] = (double*) malloc(max_entries_per_row * sizeof(double));
}
indices = (int**) malloc(m->nrows * sizeof(int*));
for (i = 0; i < m->nrows; ++i) {
indices[i] = (int*) malloc(max_entries_per_row * sizeof(int));
}
for (i = 0; i < nrows; ++i) {
for (jj = 0; jj < max_entries_per_row; ++jj) {
values[i][jj] = 0;
indices[i][jj] = -1;
}
}
for (i = 0; i < nrows; ++i) {
jj = 0;
for (j = 0; j < ncols; ++j) {
if(mal[i][j] != 0) {
values[i][jj] = mal[i][j];
indices[i][jj] = j;
jj++; // less than or equal to max_entries_per_row
}
}
}
ell->values = values;
ell->indices = indices;
ell->nnz = nnz;
ell->nrows = nrows;
ell->ncols = ncols;
ell->max_entries_per_row = max_entries_per_row;
return ell;
}
void PrintELL(ELL *e) {
printf("To-Do: Not implemented");
}
void MultiplyELL(ELL* e, double* x, double* r) {
int i, j;
int **indices = e->indices;
int nrows = e->nrows;
int ncols = e->ncols;
int max_entries_per_row = e->max_entries_per_row;
double **A = e->values;
double t;
memset(r, (int)0, ncols*sizeof(double));
#pragma omp parallel for default(shared) private(i, j, t)
for (i = 0; i < nrows; ++i) {
t = 0.0;
#pragma ivdep
for (j = 0; j < max_entries_per_row; ++j) {
t += A[i][j] * x[indices[i][j]];
}
r[i] = t;
}
}
void DestroyELL(ELL* c, MATRIX *m) {
int i;
for (i = 0; i < m->nrows; ++i) {
free(m->mel[i]);
}
free(c->values);
free(c->indices);
free(c);
}
|
phono3py.c | /* Copyright (C) 2021 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "phono3py.h"
#include <stdio.h>
#include <stdlib.h>
#include "bzgrid.h"
#include "collision_matrix.h"
#include "fc3.h"
#include "grgrid.h"
#include "imag_self_energy_with_g.h"
#include "interaction.h"
#include "isotope.h"
#include "lagrid.h"
#include "lapack_wrapper.h"
#include "phonoc_array.h"
#include "pp_collision.h"
#include "real_self_energy.h"
#include "tetrahedron_method.h"
#include "triplet.h"
#include "triplet_iw.h"
long ph3py_get_interaction(
Darray *fc3_normal_squared, const char *g_zero, const Darray *frequencies,
const lapack_complex_double *eigenvectors, const long (*triplets)[3],
const long num_triplets, const long (*bz_grid_addresses)[3],
const long D_diag[3], const long Q[3][3], const double *fc3,
const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multiplicity)[2],
const double *masses, const long *p2s_map, const long *s2p_map,
const long *band_indices, const long symmetrize_fc3_q,
const double cutoff_frequency) {
ConstBZGrid *bzgrid;
long i, j;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = 0;
for (j = 0; j < 3; j++) {
bzgrid->Q[i][j] = Q[i][j];
}
}
itr_get_interaction(fc3_normal_squared, g_zero, frequencies, eigenvectors,
triplets, num_triplets, bzgrid, fc3, is_compact_fc3,
svecs, multi_dims, multiplicity, masses, p2s_map,
s2p_map, band_indices, symmetrize_fc3_q,
cutoff_frequency);
free(bzgrid);
bzgrid = NULL;
return 1;
}
long ph3py_get_pp_collision(
double *imag_self_energy,
const long relative_grid_address[24][4][3], /* thm */
const double *frequencies, const lapack_complex_double *eigenvectors,
const long (*triplets)[3], const long num_triplets,
const long *triplet_weights, const long (*bz_grid_addresses)[3], /* thm */
const long *bz_map, /* thm */
const long bz_grid_type, const long D_diag[3], const long Q[3][3],
const double *fc3, const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multiplicity)[2],
const double *masses, const long *p2s_map, const long *s2p_map,
const Larray *band_indices, const Darray *temperatures, const long is_NU,
const long symmetrize_fc3_q, const double cutoff_frequency) {
ConstBZGrid *bzgrid;
long i, j;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = 0;
for (j = 0; j < 3; j++) {
bzgrid->Q[i][j] = Q[i][j];
}
}
ppc_get_pp_collision(
imag_self_energy, relative_grid_address, frequencies, eigenvectors,
triplets, num_triplets, triplet_weights, bzgrid, fc3, is_compact_fc3,
svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices,
temperatures, is_NU, symmetrize_fc3_q, cutoff_frequency);
free(bzgrid);
bzgrid = NULL;
return 1;
}
long ph3py_get_pp_collision_with_sigma(
double *imag_self_energy, const double sigma, const double sigma_cutoff,
const double *frequencies, const lapack_complex_double *eigenvectors,
const long (*triplets)[3], const long num_triplets,
const long *triplet_weights, const long (*bz_grid_addresses)[3],
const long D_diag[3], const long Q[3][3], const double *fc3,
const long is_compact_fc3, const double (*svecs)[3],
const long multi_dims[2], const long (*multiplicity)[2],
const double *masses, const long *p2s_map, const long *s2p_map,
const Larray *band_indices, const Darray *temperatures, const long is_NU,
const long symmetrize_fc3_q, const double cutoff_frequency) {
ConstBZGrid *bzgrid;
long i, j;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = 0;
for (j = 0; j < 3; j++) {
bzgrid->Q[i][j] = Q[i][j];
}
}
ppc_get_pp_collision_with_sigma(
imag_self_energy, sigma, sigma_cutoff, frequencies, eigenvectors,
triplets, num_triplets, triplet_weights, bzgrid, fc3, is_compact_fc3,
svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices,
temperatures, is_NU, symmetrize_fc3_q, cutoff_frequency);
free(bzgrid);
bzgrid = NULL;
return 1;
}
void ph3py_get_imag_self_energy_at_bands_with_g(
double *imag_self_energy, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double *g, const char *g_zero,
const double temperature, const double cutoff_frequency,
const long num_frequency_points, const long frequency_point_index) {
ise_get_imag_self_energy_at_bands_with_g(
imag_self_energy, fc3_normal_squared, frequencies, triplets,
triplet_weights, g, g_zero, temperature, cutoff_frequency,
num_frequency_points, frequency_point_index);
}
void ph3py_get_detailed_imag_self_energy_at_bands_with_g(
double *detailed_imag_self_energy, double *imag_self_energy_N,
double *imag_self_energy_U, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const long (*bz_grid_addresses)[3],
const double *g, const char *g_zero, const double temperature,
const double cutoff_frequency) {
ise_get_detailed_imag_self_energy_at_bands_with_g(
detailed_imag_self_energy, imag_self_energy_N, imag_self_energy_U,
fc3_normal_squared, frequencies, triplets, triplet_weights,
bz_grid_addresses, g, g_zero, temperature, cutoff_frequency);
}
void ph3py_get_real_self_energy_at_bands(
double *real_self_energy, const Darray *fc3_normal_squared,
const long *band_indices, const double *frequencies,
const long (*triplets)[3], const long *triplet_weights,
const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
rse_get_real_self_energy_at_bands(real_self_energy, fc3_normal_squared,
band_indices, frequencies, triplets,
triplet_weights, epsilon, temperature,
unit_conversion_factor, cutoff_frequency);
}
void ph3py_get_real_self_energy_at_frequency_point(
double *real_self_energy, const double frequency_point,
const Darray *fc3_normal_squared, const long *band_indices,
const double *frequencies, const long (*triplets)[3],
const long *triplet_weights, const double epsilon, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
rse_get_real_self_energy_at_frequency_point(
real_self_energy, frequency_point, fc3_normal_squared, band_indices,
frequencies, triplets, triplet_weights, epsilon, temperature,
unit_conversion_factor, cutoff_frequency);
}
void ph3py_get_collision_matrix(
double *collision_matrix, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplets_map, const long *map_q,
const long *rotated_grid_points, const double *rotations_cartesian,
const double *g, const long num_ir_gp, const long num_gp,
const long num_rot, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
col_get_collision_matrix(collision_matrix, fc3_normal_squared, frequencies,
triplets, triplets_map, map_q, rotated_grid_points,
rotations_cartesian, g, num_ir_gp, num_gp, num_rot,
temperature, unit_conversion_factor,
cutoff_frequency);
}
void ph3py_get_reducible_collision_matrix(
double *collision_matrix, const Darray *fc3_normal_squared,
const double *frequencies, const long (*triplets)[3],
const long *triplets_map, const long *map_q, const double *g,
const long num_gp, const double temperature,
const double unit_conversion_factor, const double cutoff_frequency) {
col_get_reducible_collision_matrix(
collision_matrix, fc3_normal_squared, frequencies, triplets,
triplets_map, map_q, g, num_gp, temperature, unit_conversion_factor,
cutoff_frequency);
}
void ph3py_get_isotope_scattering_strength(
double *gamma, const long grid_point, const double *mass_variances,
const double *frequencies, const lapack_complex_double *eigenvectors,
const long num_grid_points, const long *band_indices, const long num_band,
const long num_band0, const double sigma, const double cutoff_frequency) {
iso_get_isotope_scattering_strength(gamma, grid_point, mass_variances,
frequencies, eigenvectors,
num_grid_points, band_indices, num_band,
num_band0, sigma, cutoff_frequency);
}
void ph3py_get_thm_isotope_scattering_strength(
double *gamma, const long grid_point, const long *ir_grid_points,
const long *weights, const double *mass_variances,
const double *frequencies, const lapack_complex_double *eigenvectors,
const long num_ir_grid_points, const long *band_indices,
const long num_band, const long num_band0,
const double *integration_weights, const double cutoff_frequency) {
iso_get_thm_isotope_scattering_strength(
gamma, grid_point, ir_grid_points, weights, mass_variances, frequencies,
eigenvectors, num_ir_grid_points, band_indices, num_band, num_band0,
integration_weights, cutoff_frequency);
}
void ph3py_distribute_fc3(double *fc3, const long target, const long source,
const long *atom_mapping, const long num_atom,
const double *rot_cart) {
fc3_distribute_fc3(fc3, target, source, atom_mapping, num_atom, rot_cart);
}
void ph3py_rotate_delta_fc2(double (*fc3)[3][3][3],
const double (*delta_fc2s)[3][3],
const double *inv_U,
const double (*site_sym_cart)[3][3],
const long *rot_map_syms, const long num_atom,
const long num_site_sym, const long num_disp) {
fc3_rotate_delta_fc2(fc3, delta_fc2s, inv_U, site_sym_cart, rot_map_syms,
num_atom, num_site_sym, num_disp);
}
void ph3py_get_permutation_symmetry_fc3(double *fc3, const long num_atom) {
fc3_set_permutation_symmetry_fc3(fc3, num_atom);
}
void ph3py_get_permutation_symmetry_compact_fc3(
double *fc3, const long p2s[], const long s2pp[], const long nsym_list[],
const long perms[], const long n_satom, const long n_patom) {
fc3_set_permutation_symmetry_compact_fc3(fc3, p2s, s2pp, nsym_list, perms,
n_satom, n_patom);
}
void ph3py_transpose_compact_fc3(double *fc3, const long p2s[],
const long s2pp[], const long nsym_list[],
const long perms[], const long n_satom,
const long n_patom, const long t_type) {
fc3_transpose_compact_fc3(fc3, p2s, s2pp, nsym_list, perms, n_satom,
n_patom, t_type);
}
long ph3py_get_triplets_reciprocal_mesh_at_q(
long *map_triplets, long *map_q, const long grid_point,
const long D_diag[3], const long is_time_reversal, const long num_rot,
const long (*rec_rotations)[3][3], const long swappable) {
return tpl_get_triplets_reciprocal_mesh_at_q(
map_triplets, map_q, grid_point, D_diag, is_time_reversal, num_rot,
rec_rotations, swappable);
}
long ph3py_get_BZ_triplets_at_q(long (*triplets)[3], const long grid_point,
const long (*bz_grid_addresses)[3],
const long *bz_map, const long *map_triplets,
const long num_map_triplets,
const long D_diag[3], const long Q[3][3],
const long bz_grid_type) {
ConstBZGrid *bzgrid;
long i, j, num_ir;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = 0;
for (j = 0; j < 3; j++) {
bzgrid->Q[i][j] = Q[i][j];
}
}
bzgrid->size = num_map_triplets;
num_ir =
tpl_get_BZ_triplets_at_q(triplets, grid_point, bzgrid, map_triplets);
free(bzgrid);
bzgrid = NULL;
return num_ir;
}
/* relative_grid_addresses are given as P multipled with those from dataset,
* i.e.,
* np.dot(relative_grid_addresses, P.T) */
long ph3py_get_integration_weight(
double *iw, char *iw_zero, const double *frequency_points,
const long num_band0, const long relative_grid_address[24][4][3],
const long D_diag[3], const long (*triplets)[3], const long num_triplets,
const long (*bz_grid_addresses)[3], const long *bz_map,
const long bz_grid_type, const double *frequencies1, const long num_band1,
const double *frequencies2, const long num_band2, const long tp_type,
const long openmp_per_triplets, const long openmp_per_bands) {
ConstBZGrid *bzgrid;
long i;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
}
tpl_get_integration_weight(
iw, iw_zero, frequency_points, num_band0, relative_grid_address,
triplets, num_triplets, bzgrid, frequencies1, num_band1, frequencies2,
num_band2, tp_type, openmp_per_triplets, openmp_per_bands);
free(bzgrid);
bzgrid = NULL;
return 1;
}
void ph3py_get_integration_weight_with_sigma(
double *iw, char *iw_zero, const double sigma, const double sigma_cutoff,
const double *frequency_points, const long num_band0,
const long (*triplets)[3], const long num_triplets,
const double *frequencies, const long num_band, const long tp_type) {
tpl_get_integration_weight_with_sigma(
iw, iw_zero, sigma, sigma_cutoff, frequency_points, num_band0, triplets,
num_triplets, frequencies, num_band, tp_type);
}
/* From single address to grid index */
long ph3py_get_grid_index_from_address(const long address[3],
const long D_diag[3]) {
return grg_get_grid_index(address, D_diag);
}
void ph3py_get_gr_grid_addresses(long gr_grid_addresses[][3],
const long D_diag[3]) {
grg_get_all_grid_addresses(gr_grid_addresses, D_diag);
}
long ph3py_get_reciprocal_rotations(long rec_rotations[48][3][3],
const long (*rotations)[3][3],
const long num_rot,
const long is_time_reversal) {
return grg_get_reciprocal_point_group(rec_rotations, rotations, num_rot,
is_time_reversal, 1);
}
/* Rotation matrices with respect to reciprocal basis vectors are
* transformed to those for GRGrid. This set of the rotations are
* used always in GRGrid handling. */
long ph3py_transform_rotations(long (*transformed_rots)[3][3],
const long (*rotations)[3][3],
const long num_rot, const long D_diag[3],
const long Q[3][3]) {
return grg_transform_rotations(transformed_rots, rotations, num_rot, D_diag,
Q);
}
long ph3py_get_snf3x3(long D_diag[3], long P[3][3], long Q[3][3],
const long A[3][3]) {
return grg_get_snf3x3(D_diag, P, Q, A);
}
/* The rotations are those after proper transformation in GRGrid. */
long ph3py_get_ir_grid_map(long *ir_grid_map, const long D_diag[3],
const long PS[3], const long (*grg_rotations)[3][3],
const long num_rot) {
long num_ir, i;
grg_get_ir_grid_map(ir_grid_map, grg_rotations, num_rot, D_diag, PS);
num_ir = 0;
for (i = 0; i < D_diag[0] * D_diag[1] * D_diag[2]; i++) {
if (ir_grid_map[i] == i) {
num_ir++;
}
}
return num_ir;
}
long ph3py_get_bz_grid_addresses(long (*bz_grid_addresses)[3], long *bz_map,
long *bzg2grg, const long D_diag[3],
const long Q[3][3], const long PS[3],
const double rec_lattice[3][3],
const long type) {
BZGrid *bzgrid;
long i, j, size;
if ((bzgrid = (BZGrid *)malloc(sizeof(BZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->bzg2grg = bzg2grg;
bzgrid->type = type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = PS[i];
for (j = 0; j < 3; j++) {
bzgrid->Q[i][j] = Q[i][j];
bzgrid->reclat[i][j] = rec_lattice[i][j];
}
}
if (bzg_get_bz_grid_addresses(bzgrid)) {
size = bzgrid->size;
} else {
size = 0;
}
free(bzgrid);
bzgrid = NULL;
return size;
}
long ph3py_rotate_bz_grid_index(const long bz_grid_index,
const long rotation[3][3],
const long (*bz_grid_addresses)[3],
const long *bz_map, const long D_diag[3],
const long PS[3], const long bz_grid_type) {
ConstBZGrid *bzgrid;
long i, rot_bz_gp;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
bzgrid->PS[i] = 0;
}
rot_bz_gp = bzg_rotate_grid_index(bz_grid_index, rotation, bzgrid);
free(bzgrid);
bzgrid = NULL;
return rot_bz_gp;
}
void ph3py_symmetrize_collision_matrix(double *collision_matrix,
const long num_column,
const long num_temp,
const long num_sigma) {
double val;
long i, j, k, l, adrs_shift;
for (i = 0; i < num_sigma; i++) {
for (j = 0; j < num_temp; j++) {
adrs_shift = (i * num_column * num_column * num_temp +
j * num_column * num_column);
/* show_colmat_info(py_collision_matrix, i, j, adrs_shift); */
#ifdef PHPYOPENMP
#pragma omp parallel for schedule(guided) private(l, val)
#endif
for (k = 0; k < num_column; k++) {
for (l = k + 1; l < num_column; l++) {
val = (collision_matrix[adrs_shift + k * num_column + l] +
collision_matrix[adrs_shift + l * num_column + k]) /
2;
collision_matrix[adrs_shift + k * num_column + l] = val;
collision_matrix[adrs_shift + l * num_column + k] = val;
}
}
}
}
}
void ph3py_expand_collision_matrix(double *collision_matrix,
const long *rot_grid_points,
const long *ir_grid_points,
const long num_ir_gp,
const long num_grid_points,
const long num_rot, const long num_sigma,
const long num_temp, const long num_band)
{
long i, j, k, l, m, n, p, adrs_shift, adrs_shift_plus, ir_gp, gp_r;
long num_column, num_bgb;
long *multi;
double *colmat_copy;
multi = (long *)malloc(sizeof(long) * num_ir_gp);
colmat_copy = NULL;
num_column = num_grid_points * num_band;
num_bgb = num_band * num_grid_points * num_band;
#ifdef PHPYOPENMP
#pragma omp parallel for schedule(guided) private(j, ir_gp)
#endif
for (i = 0; i < num_ir_gp; i++) {
ir_gp = ir_grid_points[i];
multi[i] = 0;
for (j = 0; j < num_rot; j++) {
if (rot_grid_points[j * num_grid_points + ir_gp] == ir_gp) {
multi[i]++;
}
}
}
for (i = 0; i < num_sigma; i++) {
for (j = 0; j < num_temp; j++) {
adrs_shift = (i * num_column * num_column * num_temp +
j * num_column * num_column);
#ifdef PHPYOPENMP
#pragma omp parallel for private(ir_gp, adrs_shift_plus, colmat_copy, l, gp_r, \
m, n, p)
#endif
for (k = 0; k < num_ir_gp; k++) {
ir_gp = ir_grid_points[k];
adrs_shift_plus = adrs_shift + ir_gp * num_bgb;
colmat_copy = (double *)malloc(sizeof(double) * num_bgb);
for (l = 0; l < num_bgb; l++) {
colmat_copy[l] =
collision_matrix[adrs_shift_plus + l] / multi[k];
collision_matrix[adrs_shift_plus + l] = 0;
}
for (l = 0; l < num_rot; l++) {
gp_r = rot_grid_points[l * num_grid_points + ir_gp];
for (m = 0; m < num_band; m++) {
for (n = 0; n < num_grid_points; n++) {
for (p = 0; p < num_band; p++) {
collision_matrix
[adrs_shift + gp_r * num_bgb +
m * num_grid_points * num_band +
rot_grid_points[l * num_grid_points + n] *
num_band +
p] +=
colmat_copy[m * num_grid_points * num_band +
n * num_band + p];
}
}
}
}
free(colmat_copy);
colmat_copy = NULL;
}
}
}
free(multi);
multi = NULL;
}
/* tpi_get_neighboring_grid_points around multiple grid points for using openmp
*
* relative_grid_addresses are given as P multipled with those from dataset,
* i.e.,
* np.dot(relative_grid_addresses, P.T) */
long ph3py_get_neighboring_gird_points(
long *relative_grid_points, const long *grid_points,
const long (*relative_grid_address)[3], const long D_diag[3],
const long (*bz_grid_addresses)[3], const long *bz_map,
const long bz_grid_type, const long num_grid_points,
const long num_relative_grid_address) {
long i;
ConstBZGrid *bzgrid;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
}
#ifdef PHPYOPENMP
#pragma omp parallel for
#endif
for (i = 0; i < num_grid_points; i++) {
tpi_get_neighboring_grid_points(
relative_grid_points + i * num_relative_grid_address,
grid_points[i], relative_grid_address, num_relative_grid_address,
bzgrid);
}
free(bzgrid);
bzgrid = NULL;
return 1;
}
/* thm_get_integration_weight at multiple grid points for using openmp
*
* relative_grid_addresses are given as P multipled with those from dataset,
* i.e.,
* np.dot(relative_grid_addresses, P.T) */
long ph3py_get_thm_integration_weights_at_grid_points(
double *iw, const double *frequency_points, const long num_frequency_points,
const long num_band, const long num_gp,
const long (*relative_grid_address)[4][3], const long D_diag[3],
const long *grid_points, const long (*bz_grid_addresses)[3],
const long *bz_map, const long bz_grid_type, const double *frequencies,
const long *gp2irgp_map, const char function) {
long i, j, k, bi;
long vertices[24][4];
double freq_vertices[24][4];
ConstBZGrid *bzgrid;
if ((bzgrid = (ConstBZGrid *)malloc(sizeof(ConstBZGrid))) == NULL) {
warning_print("Memory could not be allocated.");
return 0;
}
bzgrid->addresses = bz_grid_addresses;
bzgrid->gp_map = bz_map;
bzgrid->type = bz_grid_type;
for (i = 0; i < 3; i++) {
bzgrid->D_diag[i] = D_diag[i];
}
#ifdef PHPYOPENMP
#pragma omp parallel for private(j, k, bi, vertices, freq_vertices)
#endif
for (i = 0; i < num_gp; i++) {
for (j = 0; j < 24; j++) {
tpi_get_neighboring_grid_points(vertices[j], grid_points[i],
relative_grid_address[j], 4,
bzgrid);
}
for (bi = 0; bi < num_band; bi++) {
for (j = 0; j < 24; j++) {
for (k = 0; k < 4; k++) {
freq_vertices[j][k] =
frequencies[gp2irgp_map[vertices[j][k]] * num_band +
bi];
}
}
for (j = 0; j < num_frequency_points; j++) {
iw[i * num_frequency_points * num_band + j * num_band + bi] =
thm_get_integration_weight(frequency_points[j],
freq_vertices, function);
}
}
}
free(bzgrid);
bzgrid = NULL;
return 1;
}
|
GroupDegree.h | /*
* GroupDegree.h
*
* Created on: 20.04.2018
* Author: Eugenio Angriman
*/
#ifndef GROUPDEGREE_H_
#define GROUPDEGREE_H_
#include <omp.h>
#include "../auxiliary/BucketPQ.h"
#include "../base/Algorithm.h"
#include "../graph/Graph.h"
namespace NetworKit {
/**
* @ingroup centrality
*/
class GroupDegree : public Algorithm {
public:
/**
* Finds the group with the highest group degree centrality according to the
* definition proposed in 'The centrality of groups and classes' by Everett et
* al. (The Journal of mathematical sociology, 1999). This is a submodular but
* non monotone function so the algorithm can find a solution that is at least
* 1/2 of the optimum. Worst-case running time is quadratic, but usually
* faster in real-world networks.
* The 'countGroupNodes' option also count the nodes inside the group in the
* score, this make the group degree monotone and submodular and the algorithm
* is guaranteed to return a (1 - 1/e)-approximation of the optimal solution.
*
* @param G A graph.
* @param k Size of the group of nodes
* @param countGroupNodes if nodes inside the group should be counted in the
* centrality score.
*/
GroupDegree(const Graph &G, count k = 1, bool countGroupNodes = true);
/**
* Computes the group with maximum degree centrality of the graph passed in
* the constructor.
*/
void run() override;
/**
* Returns the group with maximum degree centrality.
*/
std::vector<node> groupMaxDegree();
/**
* Returns the score of the group with maximum degree centrality (i.e. the
* number of nodes outside the group that can be reached in one hop from at
* least one node in the group).
*/
count getScore();
/**
* Returns the score of the given group.
*/
count scoreOfGroup(const std::vector<node> &group) const;
protected:
Graph G;
const count k;
const bool countGroupNodes;
count n;
std::vector<node> group;
std::vector<int64_t> gain;
std::vector<bool> reachable;
std::vector<bool> affected;
std::vector<bool> inGroup;
Aux::BucketPQ queue;
count groupScore;
void init();
void updateQueue();
void updateGroup();
void computeScore();
void checkHasRun();
void checkGroup(const std::vector<node> &group) const;
};
inline std::vector<node> GroupDegree::groupMaxDegree() {
checkHasRun();
return group;
}
inline count GroupDegree::getScore() {
checkHasRun();
return groupScore;
}
inline void GroupDegree::computeScore() {
groupScore = std::count(reachable.begin(), reachable.end(), true);
if (!countGroupNodes) {
groupScore -= k;
}
}
inline void GroupDegree::checkHasRun() {
if (!hasRun) {
throw std::runtime_error("Run method has not been called.");
}
}
inline void GroupDegree::checkGroup(const std::vector<node> &group) const {
const count z = G.upperNodeIdBound();
std::vector<bool> check(z, false);
#pragma omp parallel for
for (omp_index i = 0; i < static_cast<omp_index>(group.size()); ++i) {
node u = group[i];
if (u >= z) {
std::stringstream ss;
ss << "Error: node " << u << " is not in the graph.\n";
throw std::runtime_error(ss.str());
}
if (check[u]) {
std::stringstream ss;
ss << "Error: the group contains duplicates of node " << u << ".\n";
throw std::runtime_error(ss.str());
}
check[u] = true;
}
}
inline count GroupDegree::scoreOfGroup(const std::vector<node> &group) const {
checkGroup(group);
std::vector<bool> touched(n, false);
std::vector<bool> inGroup(n, false);
for (count i = 0; i < group.size(); ++i) {
inGroup[group[i]] = true;
}
auto processNeighbor = [&](const node u, const node v) {
if (inGroup[u]) {
touched[v] = true;
}
};
G.forNodes([&](node v) {
if (!inGroup[v]) {
G.forInNeighborsOf(v, [&](node u) { processNeighbor(u, v); });
}
});
count result = std::count(touched.begin(), touched.end(), true);
if (countGroupNodes) {
result += group.size();
}
return result;
}
} // namespace NetworKit
#endif
|
syncs.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB LU code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "applu.incl"
//---------------------------------------------------------------------
// Thread synchronization for pipeline operation
//---------------------------------------------------------------------
void sync_left(int ldmx, int ldmy, int ldmz,
double v[ldmz][ldmy/2*2+1][ldmx/2*2+1][5])
{
int neigh;
if (iam > 0 && iam <= mthreadnum) {
neigh = iam - 1;
while (isync[neigh] == 0) {
#pragma omp flush(isync)
}
isync[neigh] = 0;
#pragma omp flush(isync,v)
}
}
//---------------------------------------------------------------------
// Thread synchronization for pipeline operation
//---------------------------------------------------------------------
void sync_right(int ldmx, int ldmy, int ldmz,
double v[ldmz][ldmy/2*2+1][ldmx/2*2+1][5])
{
if (iam < mthreadnum) {
#pragma omp flush(isync,v)
while (isync[iam] == 1) {
#pragma omp flush(isync)
}
isync[iam] = 1;
#pragma omp flush(isync)
}
}
|
GB_unaryop__minv_uint64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint64_bool
// op(A') function: GB_tran__minv_uint64_bool
// C type: uint64_t
// A type: bool
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint64_bool
(
uint64_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hermm_c_dia_u_hi_col_trans.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include <stdlib.h>
alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Complex *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for(ALPHA_INT j = 0; j < columns; j++)
for (ALPHA_INT i = 0; i < mat->rows; i++){
alpha_mul(y[index2(j,i,ldy)],y[index2(j,i,ldy)],beta);
alpha_madde(y[index2(j,i,ldy)],x[index2(j,i,ldx)],alpha);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT cc = 0; cc < columns; ++cc)
{
ALPHA_Complex* Y = &y[index2(cc,0,ldy)];
const ALPHA_Complex* X = &x[index2(cc,0,ldx)];
for(ALPHA_INT di = 0; di < mat->ndiag;++di){
ALPHA_INT d = mat->distance[di];
if(d > 0){
ALPHA_INT ars = alpha_max(0,-d);
ALPHA_INT acs = alpha_max(0,d);
ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs);
for(ALPHA_INT i = 0; i < an; ++i){
ALPHA_INT ar = ars + i;
ALPHA_INT ac = acs + i;
ALPHA_Complex val,val_c;
alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha);
alpha_mul_2c(val_c,mat->values[index2(di,ar,mat->lval)],alpha);
alpha_madde(Y[ar],val_c,X[ac]);
alpha_madde(Y[ac],val,X[ar]);
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
fibonacci.c | #include "timing.h"
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
long int fibonacci(int n) {
long int x, y;
if (n < 2) {
return n;
} else {
#pragma omp task shared(x)
x = fibonacci(n-1);
#pragma omp task shared(y)
y = fibonacci(n-2);
#pragma omp taskwait
return (x+y);
}
}
int main() {
int n = 42;
double t1, t2;
long int fib = 0;
t1 = second();
#pragma omp parallel
{
#pragma omp single nowait
{
fib = fibonacci(n);
}
}
t2 = second();
printf("fib(%d) = %ld (in %g [s])\n", n, fib, (t2-t1));
return 0;
}
|
thdat02.c | /*
* Redistribution and use in source and binary forms, with
* or without modification, are permitted provided that the
* following conditions are met:
*
* 1. Redistributions of source code must retain this list
* of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce this
* list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include <config.h>
#include <stdlib.h>
#include <thtk/thtk.h>
#include "thdat.h"
#include "thrle.h"
#include "util.h"
#include "dattypes.h"
static int
th02_open(
thdat_t* thdat,
thtk_error_t** error)
{
th03_archive_header_t th03_archive_header;
th02_entry_header_t* th02_entry_headers = NULL;
th03_entry_header_t* th03_entry_headers = NULL;
if (thdat->version <= 2) {
th02_entry_header_t eh2;
if (thtk_io_read(thdat->stream, &eh2, sizeof(eh2), error) != sizeof(eh2))
return 0;
if (thtk_io_seek(thdat->stream, 0, SEEK_SET, error) == -1)
return 0;
if (eh2.offset % sizeof(eh2)) {
thtk_error_new(error, "first entry offset invalid");
return 0;
}
thdat->entry_count = (eh2.offset / sizeof(eh2)) - 1;
} else {
if (thtk_io_read(thdat->stream, &th03_archive_header, sizeof(th03_archive_header), error) != sizeof(th03_archive_header))
return 0;
thdat->entry_count = th03_archive_header.count;
}
thdat->entries = malloc(thdat->entry_count * sizeof(thdat_entry_t));
if (thdat->version <= 2) {
th02_entry_headers = malloc(thdat->entry_count * sizeof(th02_entry_header_t));
if (thtk_io_read(thdat->stream, th02_entry_headers, thdat->entry_count * sizeof(th02_entry_header_t), error) !=
(ssize_t)(thdat->entry_count * sizeof(th02_entry_header_t)))
return 0;
} else {
th03_entry_headers = malloc(thdat->entry_count * sizeof(th03_entry_header_t));
if (thtk_io_read(thdat->stream, th03_entry_headers, thdat->entry_count * sizeof(th03_entry_header_t), error) !=
(ssize_t)(thdat->entry_count * sizeof(th03_entry_header_t)))
return 0;
unsigned char* data = (unsigned char*)th03_entry_headers;
for (size_t i = 0; i < thdat->entry_count * sizeof(th03_entry_header_t); ++i) {
data[i] ^= th03_archive_header.key;
th03_archive_header.key -= data[i];
}
}
for (unsigned int e = 0; e < thdat->entry_count; ++e) {
thdat_entry_t* entry = &thdat->entries[e];
entry->extra = thdat->version <= 2
? th02_keys[thdat->version - 1] /* th02_entry_headers[e].key */
: th03_entry_headers[e].key;
if (thdat->version <= 2) {
for (unsigned int i = 0; i < 13 && th02_entry_headers[e].name[i]; ++i)
th02_entry_headers[e].name[i] ^= 0xff;
}
memcpy(entry->name, thdat->version <= 2
? th02_entry_headers[e].name
: th03_entry_headers[e].name, 13);
entry->zsize = thdat->version <= 2
? th02_entry_headers[e].zsize
: th03_entry_headers[e].zsize;
entry->size = thdat->version <= 2
? th02_entry_headers[e].size
: th03_entry_headers[e].size;
entry->offset = thdat->version <= 2
? th02_entry_headers[e].offset
: th03_entry_headers[e].offset;
}
free(th02_entry_headers);
free(th03_entry_headers);
return 1;
}
static ssize_t
th02_read(
thdat_t* thdat,
int entry_index,
thtk_io_t* output,
thtk_error_t** error)
{
thdat_entry_t* entry = &thdat->entries[entry_index];
unsigned char* data;
ssize_t ret;
#pragma omp critical
{
data = thtk_io_map(thdat->stream, entry->offset, entry->zsize, error);
}
if (!data)
return -1;
for (ssize_t i = 0; i < entry->zsize; ++i)
data[i] ^= entry->extra;
if (entry->size == entry->zsize) {
ret = thtk_io_write(output, data, entry->zsize, error);
thtk_io_unmap(thdat->stream, data);
} else {
thtk_io_t* data_stream = thtk_io_open_memory(data, entry->zsize, error);
if (!data_stream)
return -1;
ret = thtk_unrle(data_stream, entry->zsize, output, error);
thtk_io_close(data_stream);
}
return ret;
}
static int
th02_create(
thdat_t* thdat,
thtk_error_t** error)
{
if (thdat->version <= 2)
thdat->offset = (thdat->entry_count + 1) * sizeof(th02_entry_header_t);
else
thdat->offset = sizeof(th03_archive_header_t) + (thdat->entry_count + 1) * sizeof(th03_entry_header_t);
if (thtk_io_seek(thdat->stream, thdat->offset, SEEK_SET, error) == -1)
return 0;
return 1;
}
/* TODO: Find out if lowercase filenames are supported. */
static ssize_t
th02_write(
thdat_t* thdat,
int entry_index,
thtk_io_t* input,
size_t input_length,
thtk_error_t** error)
{
thdat_entry_t* entry = &thdat->entries[entry_index];
entry->size = input_length;
off_t input_offset = thtk_io_seek(input, 0, SEEK_CUR, error);
if (input_offset == -1)
return -1;
if (thdat->version <= 2) {
for (unsigned int i = 0; i < 13; ++i)
if (entry->name[i])
entry->name[i] ^= 0xff;
}
thtk_io_t* output = thtk_io_open_growing_memory(error);
if (!output)
return -1;
if ((entry->zsize = thtk_rle(input, entry->size, output, error)) == -1)
return -1;
if (entry->zsize >= entry->size) {
entry->zsize = entry->size;
thtk_io_close(output);
if (thtk_io_seek(input, input_offset, SEEK_SET, error) == -1)
return -1;
output = input;
}
unsigned char* data = thtk_io_map(output, 0, entry->zsize, error);
if (!data)
return -1;
for (ssize_t i = 0; i < entry->zsize; ++i)
data[i] ^= thdat->version <= 2 ? th02_keys[thdat->version - 1] : entry_key;
ssize_t ret = -1;
#pragma omp critical
{
entry->offset = thtk_io_seek(thdat->stream, 0, SEEK_CUR, error);
if (entry->offset != -1)
ret = thtk_io_write(thdat->stream, data, entry->zsize, error);
if (ret != -1)
thdat->offset += ret;
}
thtk_io_unmap(output, data);
if (output != input)
thtk_io_close(output);
return ret;
}
static int
th02_close(
thdat_t* thdat,
thtk_error_t** error)
{
if (thtk_io_seek(thdat->stream, 0, SEEK_SET, error) == -1)
return 0;
const th03_archive_header_t ah3 = {
.size = (thdat->entry_count + 1) * sizeof(th03_entry_header_t),
.unknown1 = 2,
.count = thdat->entry_count,
.key = archive_key
};
if (thdat->version > 2) {
if (thtk_io_write(thdat->stream, &ah3, sizeof(ah3), error) == -1)
return 0;
}
size_t buffer_size = (thdat->entry_count + 1) * (thdat->version <= 2 ? sizeof(th02_entry_header_t) : sizeof(th03_entry_header_t));
unsigned char* buffer = malloc(buffer_size);
unsigned char* buffer_ptr = buffer;
memset(buffer, 0, buffer_size);
for (size_t i = 0; i < thdat->entry_count; ++i) {
thdat_entry_t* entry = &thdat->entries[i];
if (thdat->version <= 2) {
th02_entry_header_t eh2 = {
.magic = entry->zsize == entry->size ? magic1 : magic2,
.key = 3,
.zsize = entry->zsize,
.size = entry->size,
.offset = entry->offset
};
memcpy(eh2.name, entry->name, 13);
buffer_ptr = mempcpy(buffer_ptr, &eh2, sizeof(eh2));
} else {
th03_entry_header_t eh3 = {
.magic = entry->zsize == entry->size ? magic1 : magic2,
.key = entry_key,
.zsize = entry->zsize,
.size = entry->size,
.offset = entry->offset
};
memcpy(eh3.name, entry->name, 13);
buffer_ptr = mempcpy(buffer_ptr, &eh3, sizeof(eh3));
}
}
if (thdat->version > 2) {
uint32_t data_key = archive_key;
for (uint16_t i = 0; i < buffer_size; ++i) {
unsigned char tmp = buffer[i];
buffer[i] ^= data_key;
data_key -= tmp;
}
}
if (thtk_io_write(thdat->stream, buffer, buffer_size, error) == -1) {
free(buffer);
return 0;
}
free(buffer);
return 1;
}
const thdat_module_t archive_th02 = {
THDAT_BASENAME | THDAT_UPPERCASE | THDAT_8_3,
th02_open,
th02_create,
th02_close,
th02_read,
th02_write
};
|
schelude-clause-ejercicio5.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n = 16,chunk, a[n],suma=0,x;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
omp_sched_t kind;int modifier;
omp_get_schedule(&kind, &modifier);
printf("Sin modificar dyn-var %d, nthreads-var %d, run-sched-var %d %d\n",omp_get_dynamic(),omp_get_max_threads(),kind,modifier);
//MODIFICO LAS VARIABLES
omp_set_num_threads(5);
omp_set_dynamic(1);
kind = 2;
modifier=4;
omp_set_schedule(kind, modifier);
omp_get_schedule(&kind, &modifier);
printf("Sin modificar dyn-var %d, nthreads-var %d, run-sched-var %d %d\n",omp_get_dynamic(),omp_get_max_threads(),kind,modifier);
chunk = atoi(argv[1]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma) schedule(static,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d] suma=%d \n",
omp_get_thread_num(),i,suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
createWarmupPts.c | /* --------------------------------------------------------------------------
* File: createWarmupPts.c
* Version 1.0
* --------------------------------------------------------------------------
* Licence CC BY 4.0 : Free to share and modify
* Author : Marouen BEN GUEBILA - marouen.benguebila@uni.lu
* --------------------------------------------------------------------------
*/
/* createWarmupPts.c - A hybrid Open MP/MPI parallel optimization of fastFVA
Usage
createWarmupPts <datafile>
<datafile> : .mps file containing LP problem
*/
/*open mp declaration*/
#include <omp.h>
#include "mpi.h"
/* ILOG Cplex declaration*/
#include <ilcplex/cplex.h>
/* Bring in the declarations for the string functions */
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <ctype.h>
/*Forward declaration*/
static void
free_and_null (char **ptr),
usage (char *progname);
void copyArrMat(double *x, double **fluxMat, int ind, int n){
/*Copies an array into a column of a multidimensional array*/
for(int i=0;i<n;i++){
fluxMat[i][ind] = x[i];
}
}
void movePtsBds(CPXENVptr env,CPXLPptr lp, double *x, int n){
/*Moves solution points within bounds*/
double *lb = NULL, *ub= NULL;
lb = (double *) malloc (n * sizeof(double));
ub = (double *) malloc (n * sizeof(double));
CPXgetlb (env, lp, lb, 0, n-1);
CPXgetub (env, lp, ub, 0, n-1);
for(int i=0;i<n;i++){
if(x[i]<lb[i]){
x[i] = lb[i];
}else if (x[i]>ub[i]){
x[i] = ub[i];
}
}
}
void createCenterPt(double **fluxMat, int nPts, int n, double *centPt){
/*Creates center point of warmup points*/
int sum=0;
for(int i=0;i<n;i++){
sum = 0;
for(int j=0; j<nPts;j++){
sum += fluxMat[i][j];
}
centPt[i] = (double)sum/(nPts);
}
}
void movePtsCet(double **fluxMat, int nPts, int n, double *centPt){
/*Normalize warmup points to the center point*/
for(int i=0;i<n;i++){
for(int j=0;j<nPts;j++){
fluxMat[i][j] = fluxMat[i][j]*0.33 + 0.67*centPt[i];
}
}
}
void fva(CPXLPptr lp, int n, int scaling,double **fluxMat, int rank, int numprocs, int nPts){
/* The actual Open MP FVA called with CPLEX env, CPLEX LP
the optimal LP solution and n the number of rows
*/
int status;
int cnt = 1;//number of bounds to be changed
double zero=0, one=1, mOne=-1;;//optimisation percentage
int i,j,k,curpreind,tid,nthreads, solstat;
int chunk = 50, ind, indices[n];
double *values;//obj function initial array
double objval, *x = NULL;
/*optimisation loop Max:j=-1 Min:j=+1*/
#pragma omp parallel private(tid,i,j,status,solstat)
{
int iters = 0;
double wTime = omp_get_wtime();
tid=omp_get_thread_num();
if(tid==0){
nthreads=omp_get_num_threads();
if(rank==0){
printf("Number of threads = %d, Number of CPUs = %d\n\n",nthreads,numprocs);
}
}
CPXENVptr env = NULL;
CPXLPptr lpi = NULL;
env = CPXopenCPLEX (&status);//open cplex instance for every thread
//status = CPXsetintparam (env, CPX_PARAM_PREIND, CPX_OFF);//deactivate presolving
lpi = CPXcloneprob(env,lp, &status);//clone problem for every thread
/*set solver parameters*/
status = CPXsetintparam (env, CPX_PARAM_PARALLELMODE, 1);
status = CPXsetintparam (env, CPX_PARAM_THREADS, 1);
status = CPXsetintparam (env, CPX_PARAM_AUXROOTTHREADS, 2);
if (scaling){
/*Change of scaling parameter*/
status = CPXsetintparam (env, CPX_PARAM_SCAIND, mOne);//1034 is index scaling parameter
status = CPXgetintparam (env, CPX_PARAM_SCAIND, &curpreind);
}
/*Initialize array of objective coefficients*/
for(k=0;k<n;k++){
indices[k]=k;
}
/*Allocate array of zeros*/
values =(double*)calloc(n, sizeof(double));
/*Allocate solution arrary*/
x = (double *) malloc (n * sizeof(double));
/*Set seed for every thread*/
srand((unsigned int)(time(NULL)) ^ omp_get_thread_num());
#pragma omp for schedule(dynamic,chunk) collapse(2) nowait
for(i=rank*nPts/numprocs;i<(rank+1)*nPts/numprocs;i++){
for(j=+1;j>-2;j-=2){
while(solstat != 1){
status = CPXchgobj (env, lpi, n, indices, values);//turn all coeffs to zero
if(i<n){
status = CPXchgobjsen (env, lpi, j);
status = CPXchgobj (env, lpi, cnt, &i, &one);//change obj index
status = CPXlpopt (env, lpi);//solve LP
status = CPXsolution (env, lpi, &solstat, &objval, x, NULL, NULL, NULL);
}else{
for(k=0;k<n;k++){
values[k]=rand()%1 - 0.5;
}//create random objective function
status = CPXchgobjsen (env, lpi, j);//change obj sense
status = CPXchgobj (env, lpi, n, indices, values);
status = CPXlpopt (env, lpi);//solve LP
status = CPXsolution (env, lpi, &solstat, &objval, x, NULL, NULL, NULL);
}
}
iters++;
//adjust results within bounds
movePtsBds(env, lpi, x, n);
//save results
if(j==-1){//save results
ind=2*i;
copyArrMat(x, fluxMat, ind, n);
}else{
ind=2*i+1;
copyArrMat(x, fluxMat, ind, n);
}
//reinit solstat
solstat=0;
}
}
wTime = omp_get_wtime() - wTime;
printf("Thread %d/%d of process %d/%d did %d iterations in %f s\n",omp_get_thread_num(),omp_get_num_threads(),rank+1,numprocs,iters,wTime);
}
}
int main (int argc, char **argv){
int status = 0;
double elapsedTime;
struct timespec now, tmstart;
double *cost = NULL;
double *lb = NULL;
double *ub = NULL;
double zero=0;
int cnt=1;
CPXENVptr env = NULL;//CPLEX environment
CPXLPptr lp = NULL;//LP problem
int curpreind,i, j,m,n,mOne=-1,scaling=0, nPts;
double **fluxMat, **globalfluxMat;
int numprocs, rank, namelen;
char processor_name[MPI_MAX_PROCESSOR_NAME];
FILE *fp;
char fileName[100] = "warmup.csv";
char modelName[100], finalName[300], nPtsStr[10];
double *centPt = NULL; // initialize center point
/*Initialize MPI*/
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Get_processor_name(processor_name, &namelen);
/*Check arg number*/
if (rank==0){
if(( argc == 2 ) | ( argc == 3 )){
printf("\nThe model supplied is %s\n", argv[1]);
strcpy(modelName,argv[1]);
}else if( argc > 3 ) {
printf("Too many arguments supplied.\n");
goto TERMINATE;
}else {
printf("One argument expected.\n");
goto TERMINATE;
}
}
/* Initialize the CPLEX environment */
env = CPXopenCPLEX (&status);
if ( env == NULL ) {
char errmsg[CPXMESSAGEBUFSIZE];
fprintf (stderr, "Could not open CPLEX environment.\n");
CPXgeterrorstring (env, status, errmsg);
fprintf (stderr, "%s", errmsg);
goto TERMINATE;
}
/* Turn off output to the screen */
status = CPXsetintparam (env, CPXPARAM_ScreenOutput, CPX_OFF);
if ( status ) {
fprintf (stderr,
"Failure to turn on screen indicator, error %d.\n", status);
goto TERMINATE;
}
/* Turn on data checking */
/*status = CPXsetintparam (env, CPXPARAM_Read_DataCheck, CPX_ON);
if ( status ) {
fprintf (stderr,
"Failure to turn on data checking, error %d.\n", status);
goto TERMINATE;
}*/
/* Create the problem. */
lp = CPXcreateprob (env, &status, "Problem");
if ( lp == NULL ) {
fprintf (stderr, "Failed to create LP.\n");
goto TERMINATE;
}
/*Read problem */
status = CPXreadcopyprob (env, lp, argv[1], NULL);
/*Change problem type*/
status = CPXchgprobtype(env,lp,CPXPROB_LP);
/*Scaling parameter if coupled model*/
if ( argc == 3 ) {
if (atoi(argv[2])==-1){
/*Change of scaling parameter*/
scaling = 1;
status = CPXsetintparam (env, CPX_PARAM_SCAIND, mOne);//1034 is index scaling parameter
status = CPXgetintparam (env, CPX_PARAM_SCAIND, &curpreind);
printf("SCAIND parameter is %d\n",curpreind);
}
}
/* tic. */
clock_gettime(CLOCK_REALTIME, &tmstart);
/*Problem size */
m = CPXgetnumrows (env, lp);
n = CPXgetnumcols (env, lp);
/*Ask for number of warmup points*/
if(rank==0){
printf("How many warmup points should I generate? It should be larger than %d. \n", n*2);
scanf("%d", &nPts);
/* Write the output to the screen. */
printf ("Creating %d warmup points! \n", nPts);
}
/*Dynamically allocate result vector*/
globalfluxMat =(double**)calloc(n , sizeof(double*));//dimension of lines
fluxMat =(double**)calloc(n , sizeof(double*));
for(i=0;i<n;i++){//dimension of columns
fluxMat[i]=(double*)calloc(nPts , sizeof(double));
globalfluxMat[i]=(double*)calloc(nPts , sizeof(double));
}
/*Disable dynamic teams*/
omp_set_dynamic(0);
/*Allocate space for center point*/
centPt =(double*)calloc(n, sizeof(double));
/* Create warmup points */
fva(lp, n, scaling, fluxMat, rank, numprocs, nPts/2);
/*Reduce results*/
MPI_Barrier(MPI_COMM_WORLD);
MPI_Allreduce(fluxMat, globalfluxMat, n, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
/*Create center point*/
createCenterPt(globalfluxMat, nPts, n, centPt);
//for(int k=0;k<n;k++){
// printf("%f\n",centPt[k]);
//}
/*Move points to the center*/
movePtsCet(globalfluxMat, nPts, n, centPt);
/* Print results*/
/*if(rank==0){
for(i=0;i<n;i++){//print results and status
printf("Min %d is %.2f status is %.1f \n",i,globalminFlux[i],globalminsolStat[i]);
printf("Max %d is %.2f status is %.1f \n",i,globalmaxFlux[i],globalmaxsolStat[i]);
}
}*/
/*Save to csv file*/
//itoa(nPts, nPtsStr, 10);
//strcat(nPtsStr, ".csv");
//strcat(modelName, fileName);
modelName[strlen(modelName)-4] = '\0';//remove extension
sprintf(finalName,"%s%d%s",modelName,nPts,fileName);
fp=fopen(finalName,"w+");
if(rank==0){
for(i=0;i<n;i++){
for(j=0;j<nPts-1;j++){
fprintf(fp,"%f,",globalfluxMat[i][j]);
}
fprintf(fp,"%f",globalfluxMat[i][nPts-1]);//print last value
fprintf(fp,"\n");
}
}
fclose(fp);
/*Finalize*/
clock_gettime(CLOCK_REALTIME, &now);
elapsedTime = (double)((now.tv_sec+now.tv_nsec*1e-9) - (double)(tmstart.tv_sec+tmstart.tv_nsec*1e-9));
if (rank==0){
printf("Warmup points created in %.5f seconds.\n", elapsedTime);
}
MPI_Finalize();
TERMINATE:
/* Free up the problem as allocated by CPXcreateprob, if necessary */
if ( lp != NULL ) {
status = CPXfreeprob (env, &lp);
if ( status ) {
fprintf (stderr, "CPXfreeprob failed, error code %d.\n", status);
}
}
/* Free up the CPLEX environment, if necessary */
if ( env != NULL ) {
status = CPXcloseCPLEX (&env);
if ( status > 0 ) {
char errmsg[CPXMESSAGEBUFSIZE];
fprintf (stderr, "Could not close CPLEX environment.\n");
CPXgeterrorstring (env, status, errmsg);
fprintf (stderr, "%s", errmsg);
}
}
free_and_null ((char **) &cost);
free_and_null ((char **) &lb);
free_and_null ((char **) &ub);
return (status);
} /* END main */
/* Function to free up the pointer *ptr, and sets *ptr to NULL */
static void free_and_null (char **ptr){
if ( *ptr != NULL ) {
free (*ptr);
*ptr = NULL;
}
} /* END free_and_null */
static void usage (char *progname){
fprintf (stderr,"Usage: %s -X <datafile>\n", progname);
fprintf (stderr," where X is one of the following options: \n");
fprintf (stderr," r generate problem by row\n");
fprintf (stderr," c generate problem by column\n");
fprintf (stderr," Exiting...\n");
} /* END usage */
|
two_step_v_p_strategy.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: January 2016 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_TWO_STEP_V_P_STRATEGY_H
#define KRATOS_TWO_STEP_V_P_STRATEGY_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
/* #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" */
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include <stdio.h>
#include <math.h>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class TwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(TwoStepVPStrategy);
/// Counted pointer of TwoStepVPStrategy
//typedef boost::shared_ptr< TwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
///@}
///@name Life Cycle
///@{
TwoStepVPStrategy(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
InitializeStrategy(rSolverConfig);
}
TwoStepVPStrategy(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input?
mVelocityTolerance(VelTol),
mPressureTolerance(PresTol),
mMaxPressureIter(MaxPressureIterations),
mDomainSize(DomainSize),
mTimeOrder(TimeOrder),
mReformDofSet(ReformDofSet)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
/* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~TwoStepVPStrategy() {}
int Check() override
{
KRATOS_TRY;
// Check elements and conditions in the model part
int ierr = BaseType::Check();
if (ierr != 0)
return ierr;
if (DELTA_TIME.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", "");
if (BDF_COEFFICIENTS.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", "");
ModelPart &rModelPart = BaseType::GetModelPart();
if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize());
if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize());
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl)
{
ierr = itEl->Check(rCurrentProcessInfo);
if (ierr != 0)
break;
}
/* for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) */
/* { */
/* ierr = itCond->Check(rCurrentProcessInfo); */
/* if (ierr != 0) break; */
/* } */
return ierr;
KRATOS_CATCH("");
}
double Solve() override
{
// Initialize BDF2 coefficients
ModelPart &rModelPart = BaseType::GetModelPart();
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
double NormDp = 0.0;
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
unsigned int stepsWithChangedDt = rCurrentProcessInfo[STEPS_WITH_CHANGED_DT];
unsigned int maxNonLinearIterations = mMaxPressureIter;
KRATOS_INFO("TwoStepVPStrategy") << "\n Solve with two_step_vp strategy at t=" << currentTime << "s" << std::endl;
if ((timeIntervalChanged == true && currentTime > 10 * timeInterval) || stepsWithChangedDt > 0)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
bool momentumAlreadyConverged = false;
bool continuityAlreadyConverged = false;
/* boost::timer solve_step_time; */
// Iterative solution for pressure
/* unsigned int timeStep = rCurrentProcessInfo[STEP]; */
/* if(timeStep==1){ */
/* unsigned int iter=0; */
/* continuityConverged = this->SolveContinuityIteration(iter,maxNonLinearIterations); */
/* }else if(timeStep==2){ */
/* unsigned int iter=0; */
/* momentumConverged = this->SolveMomentumIteration(iter,maxNonLinearIterations,fixedTimeStep); */
/* }else{ */
// this->UnactiveSliverElements(); //this is done in set_active_flag_mesher_process which is activated from fluid_pre_refining_mesher.py
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "----- > iteration: " << it << std::endl;
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep);
this->UpdateTopology(rModelPart, BaseType::GetEchoLevel());
if ((momentumConverged == true || it == maxNonLinearIterations - 1) && momentumAlreadyConverged == false)
{
// std::ofstream myfile;
// myfile.open ("momentumConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
momentumAlreadyConverged = true;
}
if ((continuityConverged == true || it == maxNonLinearIterations - 1) && continuityAlreadyConverged == false)
{
// std::ofstream myfile;
// myfile.open ("continuityConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
continuityAlreadyConverged = true;
}
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations);
}
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 2))
{
//this->ComputeErrorL2Norm();
//this->ComputeErrorL2NormCasePoiseuille();
this->UpdateStressStrain();
// std::ofstream myfile;
// myfile.open ("maxConvergedIteration.txt",std::ios::app);
// myfile << currentTime << "\t" << it << "\n";
// myfile.close();
}
if ((continuityConverged && momentumConverged) && it > 2)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
KRATOS_INFO("TwoStepVPStrategy") << "V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
if (fixedTimeStep == true)
{
break;
}
}
/* } */
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
/* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */
if (mReformDofSet)
this->Clear();
return NormDp;
}
void FinalizeSolutionStep() override
{
/* this->UpdateStressStrain(); */
}
void InitializeSolutionStep() override
{
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
this->CalculateDisplacementsAndPorosity();
BaseType::MoveMesh();
/* BoundaryNormalsCalculationUtilities BoundaryComputation; */
/* BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); */
KRATOS_CATCH("");
}
void UnactiveSliverElements()
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
MesherUtilities MesherUtils;
double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart);
double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size());
double ElementalVolume = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
unsigned int numNodes = itElem->GetGeometry().size();
if (numNodes == (dimension + 1))
{
if (dimension == 2)
{
ElementalVolume = (itElem)->GetGeometry().Area();
}
else if (dimension == 3)
{
ElementalVolume = (itElem)->GetGeometry().Volume();
}
if (ElementalVolume < CriticalVolume)
{
// std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl;
(itElem)->Set(ACTIVE, false);
}
else
{
(itElem)->Set(ACTIVE, true);
}
}
}
}
KRATOS_CATCH("");
}
void CalculatePressureVelocity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
}
}
}
void CalculatePressureAcceleration()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval;
}
}
}
virtual void CalculateTemporalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval;
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval;
}
}
}
void CalculateAccelerations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
}
}
inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration,
const array_1d<double, 3> &CurrentVelocity,
array_1d<double, 3> &PreviousAcceleration,
const array_1d<double, 3> &PreviousVelocity,
Vector &BDFcoeffs)
{
/* noalias(PreviousAcceleration)=CurrentAcceleration; */
noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration;
// std::cout<<"rBDFCoeffs[0] is "<<rBDFCoeffs[0]<<std::endl;//3/(2*delta_t)
// std::cout<<"rBDFCoeffs[1] is "<<rBDFCoeffs[1]<<std::endl;//-2/(delta_t)
// std::cout<<"rBDFCoeffs[2] is "<<rBDFCoeffs[2]<<std::endl;//1/(2*delta_t)
}
virtual void CalculateDisplacementsAndPorosity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
/* if( i->IsFixed(DISPLACEMENT_X) == false ) */
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
/* if( i->IsFixed(DISPLACEMENT_Y) == false ) */
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
/* if( i->IsFixed(DISPLACEMENT_Z) == false ) */
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
// currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep;
}
}
void UpdateStressStrain()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
/* itElem-> InitializeElementStrainStressState(); */
itElem->InitializeSolutionStep(rCurrentProcessInfo);
}
}
/* this->CalculateAccelerations(); */
/* this->CalculatePressureVelocity(); */
/* this->CalculatePressureAcceleration(); */
this->CalculateTemporalVariables();
}
void Clear() override
{
mpMomentumStrategy->Clear();
mpPressureStrategy->Clear();
}
///@}
///@name Access
///@{
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
int StrategyLevel = Level > 0 ? Level - 1 : 0;
mpMomentumStrategy->SetEchoLevel(StrategyLevel);
mpPressureStrategy->SetEchoLevel(StrategyLevel);
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "TwoStepVPStrategy";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "TwoStepVPStrategy";
}
/// Print object's data.
void PrintData(std::ostream &rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables.
*/
void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo)
{
KRATOS_TRY;
if (mTimeOrder == 2)
{
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
}
else if (mTimeOrder == 1)
{
double Dt = rCurrentProcessInfo[DELTA_TIME];
double TimeCoeff = 1.0 / Dt;
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(2, false);
BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt)
BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt)
}
KRATOS_CATCH("");
}
bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedMomentum = false;
double NormDv = 0;
fixedTimeStep = false;
// build momentum system and solve for fractional step velocity increment
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1);
/* std::cout<<"---- m o m e n t u m e q u a t i o n s ----"<<std::endl; */
if (it == 0)
{
mpMomentumStrategy->InitializeSolutionStep();
}
/* else{ */
/* NormDv = mpMomentumStrategy->Solve(); */
/* } */
NormDv = mpMomentumStrategy->Solve();
if (BaseType::GetEchoLevel() > 1 && Rank == 0)
std::cout << "-------------- s o l v e d ! ------------------" << std::endl;
double DvErrorNorm = 0;
ConvergedMomentum = this->CheckVelocityConvergence(NormDv, DvErrorNorm);
unsigned int iterationForCheck = 2;
KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl;
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Final Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl;
fixedTimeStep = this->FixTimeStepMomentum(DvErrorNorm);
}
else if (it > iterationForCheck)
{
fixedTimeStep = this->CheckMomentumConvergence(DvErrorNorm);
}
// ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// double currentTime = rCurrentProcessInfo[TIME];
// double tolerance=0.0000000001;
// if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt025s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt05s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt075s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
// std::ofstream myfile;
// myfile.open ("velocityConvergenceAt100s.txt",std::ios::app);
// myfile << it << "\t" << DvErrorNorm << "\n";
// myfile.close();
// }
if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl;
return ConvergedMomentum;
}
bool SolveContinuityIteration(unsigned int it, unsigned int maxIt)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedContinuity = false;
double NormDp = 0;
// 2. Pressure solution
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5);
/* std::cout<<" ---- c o n t i n u i t y e q u a t i o n ----"<<std::endl; */
if (it == 0)
{
mpPressureStrategy->InitializeSolutionStep();
}
/* else{ */
/* NormDp = mpPressureStrategy->Solve(); */
/* } */
NormDp = mpPressureStrategy->Solve();
if (BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "The norm of pressure is: " << NormDp << std::endl;
double DpErrorNorm = 0;
ConvergedContinuity = this->CheckPressureConvergence(NormDp, DpErrorNorm);
KRATOS_INFO("TwoStepVPStrategy") << " iteration(" << it << ") Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl;
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("TwoStepVPStrategy") << " iteration(" << it << ") Final Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl;
ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm);
}
// ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo();
// double currentTime = rCurrentProcessInfo[TIME];
// double tolerance=0.0000000001;
// if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt025s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt05s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt075s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
// else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){
// std::ofstream myfile;
// myfile.open ("pressureConvergenceAt100s.txt",std::ios::app);
// myfile << it << "\t" << DpErrorNorm << "\n";
// myfile.close();
// }
if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl;
return ConvergedContinuity;
}
void ComputeErrorL2Norm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
long double sumErrorL2Velocity = 0;
long double sumErrorL2VelocityX = 0;
long double sumErrorL2VelocityY = 0;
long double sumErrorL2Pressure = 0;
long double sumErrorL2TauXX = 0;
long double sumErrorL2TauYY = 0;
long double sumErrorL2TauXY = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector &N = row(NContainer, 0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
// const long double velX = geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
// const long double velY = geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
// const long double pressure = geometry(i)->FastGetSolutionStepValue(PRESSURE);
// long double expectedVelocityX = pow(posX,2) * (1.0-posX)*(1.0-posX) * ( 2.0*posY - 6.0*pow(posY,2) + 4.0*pow(posY,3) );
// long double expectedVelocityY = -pow(posY,2) * (1.0-posY)*(1.0-posY) * ( 2.0*posX - 6.0*pow(posX,2) + 4.0*pow(posX,3) );
// long double expectedPressure = -posX * (1.0-posX);
// long double nodalErrorVelocityX = velX - expectedVelocityX;
// long double nodalErrorVelocityY = velY - expectedVelocityY;
// long double nodalErrorPressure = pressure - expectedPressure;
// sumErrorL2Velocity += (pow(nodalErrorVelocityX,2) + pow(nodalErrorVelocityY,2)) * nodalArea;
// sumErrorL2VelocityX += pow(nodalErrorVelocityX,2) * nodalArea;
// sumErrorL2VelocityY += pow(nodalErrorVelocityY,2) * nodalArea;
// sumErrorL2Pressure += pow(nodalErrorPressure,2) * nodalArea;
// eleErrorL2Velocity += pow(nodalErrorVelocityX,2) + pow(nodalErrorVelocityY,2);
// eleErrorL2VelocityX += pow(nodalErrorVelocityX,2);
// eleErrorL2VelocityY += pow(nodalErrorVelocityY,2);
// eleErrorL2Pressure += pow(nodalErrorPressure,2);
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
long double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3));
long double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3));
long double expectedPressure = -posX * (1.0 - posX);
eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX;
eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY;
eleErrorL2Pressure = elementalPressure - expectedPressure;
sumErrorL2VelocityX += pow(eleErrorL2VelocityX, 2) * geometry.Area();
sumErrorL2VelocityY += pow(eleErrorL2VelocityY, 2) * geometry.Area();
sumErrorL2Pressure += pow(eleErrorL2Pressure, 2) * geometry.Area();
// sumErrorL2Velocity += eleErrorL2Velocity * geometry.Area();
// sumErrorL2VelocityX += eleErrorL2VelocityX * geometry.Area();
// sumErrorL2VelocityY += eleErrorL2VelocityY * geometry.Area();
// sumErrorL2Pressure += eleErrorL2Pressure * geometry.Area();
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
long double expectedTauXX = 2.0 * (-4.0 * (1.0 - bariPosX) * bariPosX * (-1.0 + 2.0 * bariPosX) * bariPosY * (1.0 - 3.0 * bariPosY + 2.0 * pow(bariPosY, 2)));
long double expectedTauYY = 2.0 * (4.0 * bariPosX * (1.0 - 3.0 * bariPosX + 2.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * bariPosY * (-1.0 + 2.0 * bariPosY));
long double expectedTauXY = (2.0 * (1.0 - 6.0 * bariPosY + 6.0 * pow(bariPosY, 2)) * (1.0 - bariPosX) * (1.0 - bariPosX) * pow(bariPosX, 2) - 2.0 * (1.0 - 6.0 * bariPosX + 6.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * (1 - bariPosY) * pow(bariPosY, 2));
long double nodalErrorTauXX = tauXX - expectedTauXX;
long double nodalErrorTauYY = tauYY - expectedTauYY;
long double nodalErrorTauXY = tauXY - expectedTauXY;
// std::cout<<"tauXX "<<tauXX<<" expectedtauXX "<<expectedTauXX<<" nodalErrorTauXX "<<nodalErrorTauXX<<std::endl;
// std::cout<<"tauyy "<<tauYY<<" expectedtauYY "<<expectedTauYY<<" nodalErrorTauYY "<<nodalErrorTauYY<<std::endl;
// std::cout<<"tauXY "<<tauXY<<" expectedtauXY "<<expectedTauXY<<" nodalErrorTauXY "<<nodalErrorTauXY<<std::endl;
sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * geometry.Area();
sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * geometry.Area();
sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * geometry.Area();
}
}
// long double errorL2Velocity = sumErrorL2Velocity;
// long double errorL2VelocityX = sumErrorL2VelocityX;
// long double errorL2VelocityY = sumErrorL2VelocityY;
// long double errorL2Pressure = sumErrorL2Pressure;
long double errorL2Velocity = sqrt(sumErrorL2Velocity);
long double errorL2VelocityX = sqrt(sumErrorL2VelocityX);
long double errorL2VelocityY = sqrt(sumErrorL2VelocityY);
long double errorL2Pressure = sqrt(sumErrorL2Pressure);
long double errorL2TauXX = sqrt(sumErrorL2TauXX);
long double errorL2TauYY = sqrt(sumErrorL2TauYY);
long double errorL2TauXY = sqrt(sumErrorL2TauXY);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n";
myfileVelocity.close();
std::ofstream myfileVelocityX;
myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app);
myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n";
myfileVelocityX.close();
std::ofstream myfileVelocityY;
myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app);
myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n";
myfileVelocityY.close();
std::ofstream myfilePressure;
myfilePressure.open("errorL2PressureFile.txt", std::ios::app);
myfilePressure << currentTime << "\t" << errorL2Pressure << "\n";
myfilePressure.close();
std::ofstream myfileTauXX;
myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app);
myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n";
myfileTauXX.close();
std::ofstream myfileTauYY;
myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app);
myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n";
myfileTauYY.close();
std::ofstream myfileTauXY;
myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app);
myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n";
myfileTauXY.close();
}
void ComputeErrorL2NormCasePoiseuille()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
double sumErrorL2VelocityTheta = 0;
double sumErrorL2TauTheta = 0;
double r_in = 0.2;
double R_out = 0.5;
double kappa = r_in / R_out;
double omega = 0.5;
double viscosity = 100.0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector &N = row(NContainer, 0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
const double rPos = sqrt(pow(posX, 2) + pow(posY, 2));
const double cosalfa = posX / rPos;
const double sinalfa = posY / rPos;
const double sin2alfa = 2.0 * cosalfa * sinalfa;
const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2);
double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out);
double computedVelocityTheta = sqrt(pow(elementalVelocityX, 2) + pow(elementalVelocityY, 2));
double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta;
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2);
double computedTauTheta = (tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa;
double nodalErrorTauTheta = computedTauTheta - expectedTauTheta;
sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * geometry.Area();
sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * geometry.Area();
}
}
double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta);
double errorL2TauTheta = sqrt(sumErrorL2TauTheta);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n";
myfileVelocity.close();
}
bool CheckVelocityConvergence(const double NormDv, double &errorNormDv)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
errorNormDv = 0;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
errorNormDv = NormDv / NormV;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << "The norm of velocity increment is: " << NormDv << std::endl;
std::cout << "The norm of velocity is: " << NormV << std::endl;
std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl;
}
/* else{ */
/* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */
/* } */
if (errorNormDv < mVelocityTolerance)
{
return true;
}
else
{
return false;
}
}
bool CheckPressureConvergence(const double NormDp, double &errorNormDp)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormP = 0.00;
errorNormDp = 0;
#pragma omp parallel reduction(+ \
: NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
errorNormDp = NormDp / NormP;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << " The norm of pressure increment is: " << NormDp << std::endl;
std::cout << " The norm of pressure is: " << NormP << std::endl;
std::cout << " Pressure error: " << errorNormDp << std::endl;
}
/* else{ */
/* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */
/* } */
if (errorNormDp < mPressureTolerance)
{
return true;
}
else
return false;
}
bool FixTimeStepMomentum(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.005;
bool fixedTimeStep = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl;
minTolerance = 0.05;
if (DvErrorNorm > minTolerance)
{
std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
}
return fixedTimeStep;
}
bool CheckMomentumConvergence(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.99999;
bool fixedTimeStep = false;
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
}
return fixedTimeStep;
}
bool FixTimeStepContinuity(const double DvErrorNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.01;
bool fixedTimeStep = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
fixedTimeStep = true;
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true);
if (DvErrorNorm > 10 * minTolerance)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
}
return fixedTimeStep;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
double mVelocityTolerance;
double mPressureTolerance;
unsigned int mMaxPressureIter;
unsigned int mDomainSize;
unsigned int mTimeOrder;
bool mReformDofSet;
// Fractional step index.
/* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
*/
// unsigned int mStepId;
/// Scheme for the solution of the momentum equation
StrategyPointerType mpMomentumStrategy;
/// Scheme for the solution of the mass equation
StrategyPointerType mpPressureStrategy;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
virtual void InitializeStrategy(SolverSettingsType &rSolverConfig)
{
KRATOS_TRY;
mTimeOrder = rSolverConfig.GetTimeOrder();
// Check that input parameters are reasonable and sufficient.
this->Check();
//ModelPart& rModelPart = this->GetModelPart();
mDomainSize = rSolverConfig.GetDomainSize();
mReformDofSet = rSolverConfig.GetReformDofSet();
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance);
/* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", "");
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter);
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", "");
}
// Check input parameters
this->Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
TwoStepVPStrategy &operator=(TwoStepVPStrategy const &rOther) {}
/// Copy constructor.
TwoStepVPStrategy(TwoStepVPStrategy const &rOther) {}
///@}
}; /// Class TwoStepVPStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_TWO_STEP_V_P_STRATEGY_H
|
parallel_if0.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
int main()
{
// print_frame(0);
#pragma omp parallel if(0)
{
// print_frame(1);
print_ids(0);
print_ids(1);
// print_frame(0);
#pragma omp parallel if(0)
{
// print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
// print_frame(0);
#pragma omp task
{
// print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
print_ids(3);
}
}
print_fuzzy_address(1);
}
print_fuzzy_address(2);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_end'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[NESTED_PARALLEL_ID:[0-9]+]], requested_team_size=1, codeptr_ra=[[NESTED_RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker=[[PARALLEL_INVOKER]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[NESTED_IMPLICIT_TASK_ID]], parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id=[[EXPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[NESTED_IMPLICIT_TASK_ID]], second_task_id=[[EXPLICIT_TASK_ID]], prior_task_status=ompt_task_others=4
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[EXPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[NESTED_IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame={{0x[0-f]+}}, reenter_frame={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: first_task_id=[[EXPLICIT_TASK_ID]], second_task_id=[[NESTED_IMPLICIT_TASK_ID]], prior_task_status=ompt_task_complete=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[EXPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0, task_id=[[NESTED_IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[NESTED_PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[NESTED_RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file rnn_impl.h
* \brief
* \author Shu Zhang
*/
#ifndef MXNET_OPERATOR_RNN_IMPL_H_
#define MXNET_OPERATOR_RNN_IMPL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "./math.h"
#include "./math_functions-inl.h"
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./linalg.h"
template<typename DType>
inline DType sigmoid(DType x) {
return 1.0f / (1.0f + exp(-x));
}
template<typename DType>
void LstmForwardTrainingSingleLayer(DType* ws,
DType* rs,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H));
const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
h[j][k] = ht;
// reserve
y[t][j][k + offset] = ht;
c[i][j][k] = ct;
ifgo[i][j][k][0] = it;
ifgo[i][j][k][1] = ft;
ifgo[i][j][k][2] = gt;
ifgo[i][j][k][3] = ot;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
}
}
}
}
template <typename DType>
void LstmForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int cell_size = N * H;
int idx = 0; // state & cell state's idx;
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(rs + y_offset, Shape3(T, N, H * D));
LstmForwardTrainingSingleLayer<DType>(ws, rs, state_outputs, false, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardTrainingSingleLayer<DType>(ws, rs, state_outputs, true, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y.dptr_;
rs += r_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
memcpy(y_ptr, rs + y_offset, T * N * H * D * sizeof(DType));
}
template<typename DType>
void LstmForwardInferenceSingleLayer(DType* ws,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4));
Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
y[t][j][k + offset] = ht;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
} else {
h[j][k] = ht;
c[j][k] = ct;
}
}
}
}
template <typename DType>
void LstmForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int cell_size = N * H;
DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2;
DType* y_cur_ptr = y_ptr;
int idx = 0; // state & cell state's idx;
bool flag = L % 2 ? false : true;
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
// If bidirectional, need space to save current layer output y.
if (D == 2) {
y_cur_ptr = flag ? y_tmp_ptr : y_ptr;
flag = !flag;
}
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, H * D));
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
// If bidirectional, then calculate the reverse direction's forward result.
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
// Don't need to move pointer in the last layer.
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y_cur_ptr;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
}
template <typename DType>
void LstmBackwardSingleLayer(DType* ws,
DType* rs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
const Tensor<cpu, 3, DType> &dy,
const Tensor<cpu, 2, DType> &dx,
const Tensor<cpu, 2, DType> &dhx,
const Tensor<cpu, 2, DType> &dcx,
DType* dhy_ptr,
DType* dcy_ptr,
DType* w_ptr,
DType* dw_ptr,
DType* db_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I));
Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4));
Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
memset(dwh.dptr_, 0, H * H * 4 * sizeof(DType));
memset(dbx.dptr_, 0, H * 4 * sizeof(DType));
memset(dbh.dptr_, 0, H * 4 * sizeof(DType));
Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H));
Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta0 = 0.0;
const DType beta1 = 1.0;
const int cell_size = N * H;
if (dhy_ptr != NULL) {
memcpy(dh.dptr_, dhy_ptr, cell_size * sizeof(DType));
}
if (dcy_ptr != NULL) {
memcpy(dc.dptr_, dcy_ptr, cell_size * sizeof(DType));
}
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = T - 1; i >= 0; --i) {
int t = bid ? T - 1 - i : i;
int tnext = bid ? t + 1 : t - 1;
const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx;
const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx;
const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx;
const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx;
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType tc = tanh(c[i][j][k]);
DType it = ifgo[i][j][k][0];
DType ft = ifgo[i][j][k][1];
DType gt = ifgo[i][j][k][2];
DType ot = ifgo[i][j][k][3];
dh[j][k] += dy[t][j][k + offset];
dc[j][k] += dh[j][k] * ot * (1 - tc * tc);
difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it);
difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft);
difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt);
difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot);
dcnext[j][k] = dc[j][k] * ft;
if (i) {
htmp[j][k] = y[tnext][j][k + offset];
}
}
Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4));
linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false);
linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false);
}
Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4));
linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false);
linalg_gemm(dyx, x, dwx, alpha, beta0, true, false);
const int row = T * N;
const int col = H * 4;
for (int i = 0; i < row; ++i) {
for (int j = 0; j < col; ++j) {
dbx[j] += dyx[i][j];
dbh[j] = dbx[j];
}
}
}
template <typename DType>
void LstmBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dcy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dcx_ptr,
DType* dw_ptr,
DType* db_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int w_size1 = (I + H) * H * 4; // first layer
const int w_size2 = (D * H + H) * H * 4; // other layers
const int cell_size = N * H;
DType* dy_tmp_ptr = ws + T * cell_size * 4 + cell_size * 3;
for (int i = L - 1; i >= 0; --i) {
const int input_size = i ? H * D : I;
const int w_size = i ? w_size2 : w_size1;
int idx = i * D;
DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr;
DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr;
DType* db_cur_ptr = db_ptr + i * b_size * D;
DType* rs_cur_ptr = rs + i * r_size;
DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : NULL;
DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : NULL;
Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D));
Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D));
Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size));
LstmBackwardSingleLayer<DType>(ws, rs_cur_ptr, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr);
if (D == 2) {
w_cur_ptr += w_size;
dw_cur_ptr += w_size;
db_cur_ptr += b_size;
++idx;
dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : NULL;
dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : NULL;
LstmBackwardSingleLayer<DType>(ws, rs_cur_ptr, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr);
}
dy_ptr = dx.dptr_;
}
}
#endif // MXNET_OPERATOR_RNN_IMPL_H_
|
labels.h | /*
An Experimental Study on Hub Labeling based Shortest Path Algorithms [Experiments and Analyses]
Authors: Ye Li, Leong Hou U, Man Lung Yiu, Ngai Meng Kou
Contact: yb47438@umac.mo
Affiliation: University of Macau
The MIT License (MIT)
Copyright (c) 2016 University of Macau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#pragma once
#ifndef LABELS_H
#define LABELS_H
#include <limits>
#include <climits>
#include <stdlib.h>
#include <iostream>
#include <sys/time.h>
#include "graph.h"
#include "paras.h"
#include <malloc.h>
#include <xmmintrin.h>
//typedef unsigned __int64 BPSeed;
#include <omp.h>
#include<bitset>
#define numOfVertices SP_Constants::numOfVertices
#define numOfEdges SP_Constants::numOfEdges
#define INF_WEIGHT SP_Constants::INF_WEIGHT
struct index_t {
vector<NodeID> spt_v;
vector<EdgeWeight> spt_d;
NodeID size() {
return spt_v.size();
}
};
struct index_t_p {
NodeID* spt_v;
EdgeWeight* spt_d;
}__attribute__((aligned(64))); // Aligned for cache lines;
struct two_index_t_p {
NodeID* spt_v;
EdgeWeight* spt_d;
uint8_t* spt_lv;
EdgeWeight* spt_ld;
}__attribute__((aligned(64))); // Aligned for cache lines;
struct index_t_path {
vector<NodeID> spt_v;
vector<NodeID> spt_p;//parent nodes
vector<EdgeWeight> spt_d;
NodeID size() {
return spt_v.size();
}
};
struct index_t_path_p {
NodeID* spt_v;
NodeID* spt_p;
EdgeWeight* spt_d;
};
struct query_info {
NodeID meet_node;
NodeID search_len;
double time_cost;
EdgeWeight distance;
};
template<int kNumBitParallelRoots = 50>
struct index_t_bp {
NodeID* spt_v;
EdgeWeight* spt_d;
EdgeWeight bpspt_d[kNumBitParallelRoots];
uint64_t bpspt_s[kNumBitParallelRoots][2];
}__attribute__((aligned(64))); // Aligned for cache lines;
struct token_t {
NodeID* sptc_v; // sptc_v[0] is the root
EdgeWeight* sptc_d; // |*| = k + 1, sptc_d[0] is the number of children - k
unsigned char* sptc_fbv; // first-level bit vector
unsigned char* sptc_sbv; // second-level bit vector
NodeID* sptc_pathv; // intermediate point for a path
}__attribute__((aligned(64)));
class CLabel {
public:
token_t* supertokenindex_p;
token_t* tokenindex_p;
NodeID* anchor_p;
NodeID numOfTokens;
long total_children;
token_t* r_supertokenindex_p;
token_t* r_tokenindex_p;
NodeID* r_anchor_p;
NodeID r_numOfTokens;
long r_total_children;
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
token_t& tt = tokenindex_p[t];
EdgeWeight tsize = tt.sptc_d[0];
ofs.write((const char*)&tt.sptc_v[0], sizeof(tt.sptc_v[0]));
ofs.write((const char*)&tsize, sizeof(tsize));
for(NodeID c = 0; c < tsize; ++c){
ofs.write((const char*)&tt.sptc_v[1 + c], sizeof(tt.sptc_v[1 + c]));
ofs.write((const char*)&tt.sptc_d[1 + c], sizeof(tt.sptc_d[1 + c]));
}
}
ofs.close();
}
void save_labels_path(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
token_t& tt = tokenindex_p[t];
EdgeWeight tsize = tt.sptc_d[0];
ofs.write((const char*)&tt.sptc_v[0], sizeof(tt.sptc_v[0]));
ofs.write((const char*)&tsize, sizeof(tsize));
for(NodeID c = 0; c < tsize; ++c){
ofs.write((const char*)&tt.sptc_v[1 + c], sizeof(tt.sptc_v[1 + c]));
ofs.write((const char*)&tt.sptc_d[1 + c], sizeof(tt.sptc_d[1 + c]));
ofs.write((const char*)&tt.sptc_pathv[1 + c], sizeof(tt.sptc_pathv[1 + c]));
}
}
ofs.close();
}
void save_labels_d(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&r_anchor_p[v], sizeof(r_anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
token_t& tt = tokenindex_p[t];
EdgeWeight tsize = tt.sptc_d[0];
ofs.write((const char*)&tt.sptc_v[0], sizeof(tt.sptc_v[0]));
ofs.write((const char*)&tsize, sizeof(tsize));
for(NodeID c = 0; c < tsize; ++c){
ofs.write((const char*)&tt.sptc_v[1 + c], sizeof(tt.sptc_v[1 + c]));
ofs.write((const char*)&tt.sptc_d[1 + c], sizeof(tt.sptc_d[1 + c]));
}
}
ofs.write((const char*)&r_numOfTokens, sizeof(r_numOfTokens));
for (NodeID t = 0; t < r_numOfTokens; ++t) {
token_t& tt = r_tokenindex_p[t];
EdgeWeight tsize = tt.sptc_d[0];
ofs.write((const char*)&tt.sptc_v[0], sizeof(tt.sptc_v[0]));
ofs.write((const char*)&tsize, sizeof(tsize));
for(NodeID c = 0; c < tsize; ++c){
ofs.write((const char*)&tt.sptc_v[1 + c], sizeof(tt.sptc_v[1 + c]));
ofs.write((const char*)&tt.sptc_d[1 + c], sizeof(tt.sptc_d[1 + c]));
}
}
ofs.close();
}
void load_labels_path(const char* load_filename) {
total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
EdgeWeight csize;
NodeID cid;
EdgeWeight cd;
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&csize, sizeof(csize));
tt.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
total_children += (csize + 1);
tt.sptc_v[0] = cid;
tt.sptc_d[0] = csize;
for (NodeID i = 0; i < csize; ++i) {
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
tt.sptc_v[i + 1] = cid;
tt.sptc_d[i + 1] = cd;
}
}
ifs.close();
}
void load_labels(const char* load_filename) {
total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
EdgeWeight csize;
NodeID cid;
EdgeWeight cd;
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&csize, sizeof(csize));
tt.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
total_children += (csize + 1);
tt.sptc_v[0] = cid;
tt.sptc_d[0] = csize;
for (NodeID i = 0; i < csize; ++i) {
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
tt.sptc_v[i + 1] = cid;
tt.sptc_d[i + 1] = cd;
}
}
ifs.close();
}
void load_labels_d(const char* load_filename) {
total_children = 0;
r_total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
r_tokenindex_p = NULL;
r_anchor_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
r_anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
r_anchor_p[v] = anchor_id;
}
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
EdgeWeight csize;
NodeID cid;
EdgeWeight cd;
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&csize, sizeof(csize));
tt.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
total_children += (csize + 1);
tt.sptc_v[0] = cid;
tt.sptc_d[0] = csize;
for (NodeID i = 0; i < csize; ++i) {
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
tt.sptc_v[i + 1] = cid;
tt.sptc_d[i + 1] = cd;
}
}
ifs.read((char*)&isize, sizeof(isize));
r_numOfTokens = isize;
r_tokenindex_p = (token_t*)memalign(64, r_numOfTokens * sizeof(token_t));
for (NodeID v = 0; v < r_numOfTokens; ++v) {
token_t& tt = r_tokenindex_p[v];
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&csize, sizeof(csize));
tt.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
r_total_children += (csize + 1);
tt.sptc_v[0] = cid;
tt.sptc_d[0] = csize;
for (NodeID i = 0; i < csize; ++i) {
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
tt.sptc_v[i + 1] = cid;
tt.sptc_d[i + 1] = cd;
}
}
cout << "finish loading" << endl;
ifs.close();
}
void print_stat() {
cout << "Total Token #: " << numOfTokens << endl;
cout << "Average Children (Super) Token #: " << (double)total_children/(double)numOfTokens << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
void print_stat_d() {
cout << "Total Token #: " << numOfTokens << endl;
cout << "Total r_Token #: " << r_numOfTokens << endl;
cout << "Average Children (Super) Token #: " << (double)total_children/(double)numOfTokens << endl;
cout << "Average Children (Super) Token #: " << (double)r_total_children/(double)r_numOfTokens << endl;
// cout << "Maximum Label Size: " << max_size() << endl;
}
EdgeWeight query_p(NodeID s, NodeID t, long ts, vector<NodeID>& dis_vec, vector<long>& ts_vec, vector<NodeID>& que, vector<EdgeWeight>& que_d) {
if(s==t) return 0;
EdgeWeight distance = INF_WEIGHT;
NodeID anchor_s = anchor_p[s];
NodeID anchor_t = anchor_p[t];
NodeID que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_s;
que_t1 = que_h;
if(anchor_s < numOfVertices){
if(ts_vec[anchor_s] != ts){
ts_vec[anchor_s] = ts;
dis_vec[anchor_s] = 0;
}
}
else{
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight csize = token_v.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] != ts){
ts_vec[r] = ts;
dis_vec[r] = tdis;
}
for (EdgeWeight i = 0; i < csize; ++i){
NodeID w = token_v.sptc_v[i+1];
EdgeWeight w_d = token_v.sptc_d[i+1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] != ts){
ts_vec[w] = ts;
dis_vec[w] = w_d;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_t;
if(anchor_t < numOfVertices){
if(ts_vec[anchor_t] == ts){
EdgeWeight current_dis = dis_vec[anchor_t] + 0;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_t1 = que_h;
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight csize = token_v.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] == ts){
EdgeWeight current_dis = dis_vec[r] + tdis;
if(current_dis < distance)
distance = current_dis;
}
for (EdgeWeight i = 0; i < csize; ++i){
NodeID w = token_v.sptc_v[i+1];
EdgeWeight w_d = token_v.sptc_d[i+1] + tdis;
if( w < numOfVertices){
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] == ts){
EdgeWeight current_dis = dis_vec[w] + w_d;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
return distance;
}
EdgeWeight query_p_d(NodeID s, NodeID t, long ts, vector<NodeID>& dis_vec, vector<long>& ts_vec, vector<NodeID>& que, vector<EdgeWeight>& que_d) {
if(s==t) return 0;
EdgeWeight distance = INF_WEIGHT;
NodeID anchor_s = anchor_p[s];
NodeID anchor_t = r_anchor_p[t];
NodeID que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_s;
que_t1 = que_h;
if(anchor_s < numOfVertices){
if(ts_vec[anchor_s] != ts){
ts_vec[anchor_s] = ts;
dis_vec[anchor_s] = 0;
}
}
else{
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight csize = token_v.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] != ts){
ts_vec[r] = ts;
dis_vec[r] = tdis;
}
for (EdgeWeight i = 0; i < csize; ++i){
NodeID w = token_v.sptc_v[i+1];
EdgeWeight w_d = token_v.sptc_d[i+1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] != ts){
ts_vec[w] = ts;
dis_vec[w] = w_d;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_t;
if(anchor_t < numOfVertices){
if(ts_vec[anchor_t] == ts){
EdgeWeight current_dis = dis_vec[anchor_t] + 0;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_t1 = que_h;
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = r_tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight csize = token_v.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] == ts){
EdgeWeight current_dis = dis_vec[r] + tdis;
if(current_dis < distance)
distance = current_dis;
}
for (EdgeWeight i = 0; i < csize; ++i){
NodeID w = token_v.sptc_v[i+1];
EdgeWeight w_d = token_v.sptc_d[i+1] + tdis;
if( w < numOfVertices){
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] == ts){
EdgeWeight current_dis = dis_vec[w] + w_d;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
return distance;
}
void save_two_level_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
// Store supertokens
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID isize = supertoken_v.sptc_v[0];
ofs.write((const char*)&isize, sizeof(isize));
for(NodeID i = 0; i < isize; ++i){
NodeID tid = supertoken_v.sptc_v[i + 1];
EdgeWeight ew = supertoken_v.sptc_d[i + 1];
ofs.write((const char*)&tid, sizeof(tid));
ofs.write((const char*)&ew, sizeof(ew));
}
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
// Store normal tokens
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
token_t& tt = tokenindex_p[t];
NodeID sid = tt.sptc_v[0];
EdgeWeight ssize = tt.sptc_d[0];
EdgeWeight fsize = supertokenindex_p[sid].sptc_d[0];
ofs.write((const char*)&sid, sizeof(sid));
ofs.write((const char*)&ssize, sizeof(ssize));
if(ssize == 0) continue;
//ofs.write((const char*)&fsize, sizeof(fsize));
//if(t < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
for(NodeID c = 0; c < fsize; ++c){
//char a = tt.sptc_fbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_fbv[c], sizeof(tt.sptc_fbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_fbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
for(NodeID c = 0; c < ssize; ++c){
//char a = tt.sptc_sbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_sbv[c], sizeof(tt.sptc_sbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_sbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
}
ofs.close();
}
void load_two_level_labels(const char* load_filename) {
total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
supertokenindex_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
//load supertokens
NodeID cid;
EdgeWeight cd;
supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t));
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID csize;
ifs.read((char*)&csize, sizeof(csize));
supertoken_v.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
supertoken_v.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
supertoken_v.sptc_v[0] = csize;
NodeID intsize = ceil((double)ceil((double)csize / (double)8) / (double)8);
supertoken_v.sptc_d[0] = intsize;
total_children += csize;
for(EdgeWeight i = 0; i < csize; ++i){
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
supertoken_v.sptc_v[i + 1] = cid;
supertoken_v.sptc_d[i + 1] = cd;
}
}
cout << "loaded supertokens" << endl;
cout << "Average Children Super Token #: " << (double)total_children/(double)numOfVertices << endl;
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
NodeID sid;
EdgeWeight ssize;
EdgeWeight fsize;
cout<< numOfTokens << " tokens in total." << endl;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&sid, sizeof(sid));
ifs.read((char*)&ssize, sizeof(ssize));
tt.sptc_v = (NodeID*)memalign(64, 1 * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, 1 * sizeof(EdgeWeight));
tt.sptc_v[0] = sid;
tt.sptc_d[0] = ssize;
fsize = supertokenindex_p[sid].sptc_d[0];
if(ssize == 0) continue;
//if(v < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
tt.sptc_fbv = (unsigned char*)memalign(64, fsize * sizeof(unsigned char));
// unsigned char fb;
char fb;
for (NodeID i = 0; i < fsize; ++i) {
ifs.read((char*)&(tt.sptc_fbv[i]), sizeof(tt.sptc_fbv[i]));
//ifs.read((char*)&fb, sizeof(fb));
// if(v < 10){
// bitset<8> s(tt.sptc_fbv[i]);
// cout << s;
// }
}
//if(v < 10)
// cout << endl;
tt.sptc_sbv = (unsigned char*)memalign(64, ssize * sizeof(unsigned char));
//unsigned char sb;
char sb;
for (NodeID i = 0; i < ssize; ++i) {
ifs.read((char*)&(tt.sptc_sbv[i]), sizeof(tt.sptc_sbv[i]));
//ifs.read((char*)&sb, sizeof(sb));
// if(v < 10){
// bitset<8> s(tt.sptc_sbv[i]);
// cout << s;
//}
}
//if(v < 10)
// cout << endl;
//
}
cout << "loaded standard tokens" << endl;
ifs.close();
}
void save_two_level_labels_path(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
// Store supertokens
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID isize = supertoken_v.sptc_v[0];
ofs.write((const char*)&isize, sizeof(isize));
for(NodeID i = 0; i < isize; ++i){
NodeID tid = supertoken_v.sptc_v[i + 1];
EdgeWeight ew = supertoken_v.sptc_d[i + 1];
NodeID pid = supertoken_v.sptc_pathv[i + 1];
ofs.write((const char*)&tid, sizeof(tid));
ofs.write((const char*)&ew, sizeof(ew));
ofs.write((const char*)&pid, sizeof(pid));
}
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
// Store normal tokens
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
token_t& tt = tokenindex_p[t];
NodeID sid = tt.sptc_v[0];
EdgeWeight ssize = tt.sptc_d[0];
EdgeWeight fsize = supertokenindex_p[sid].sptc_d[0];
ofs.write((const char*)&sid, sizeof(sid));
ofs.write((const char*)&ssize, sizeof(ssize));
if(ssize == 0) continue;
//ofs.write((const char*)&fsize, sizeof(fsize));
//if(t < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
for(NodeID c = 0; c < fsize; ++c){
//char a = tt.sptc_fbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_fbv[c], sizeof(tt.sptc_fbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_fbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
for(NodeID c = 0; c < ssize; ++c){
//char a = tt.sptc_sbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_sbv[c], sizeof(tt.sptc_sbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_sbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
}
ofs.close();
}
void load_two_level_labels_path(const char* load_filename) {
total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
supertokenindex_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
//load supertokens
NodeID cid;
EdgeWeight cd;
supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t));
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID csize;
ifs.read((char*)&csize, sizeof(csize));
supertoken_v.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
supertoken_v.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
supertoken_v.sptc_pathv = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
supertoken_v.sptc_v[0] = csize;
NodeID intsize = ceil((double)ceil((double)csize / (double)8) / (double)8);
supertoken_v.sptc_d[0] = intsize;
supertoken_v.sptc_pathv[0] = numOfVertices;
total_children += csize;
for(EdgeWeight i = 0; i < csize; ++i){
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
supertoken_v.sptc_v[i + 1] = cid;
supertoken_v.sptc_d[i + 1] = cd;
ifs.read((char*)&cid, sizeof(cid));
supertoken_v.sptc_pathv[i + 1] = cid;
}
}
cout << "loaded supertokens" << endl;
cout << "Average Children Super Token #: " << (double)total_children/(double)numOfVertices << endl;
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
NodeID sid;
EdgeWeight ssize;
EdgeWeight fsize;
cout<< numOfTokens << " tokens in total." << endl;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&sid, sizeof(sid));
ifs.read((char*)&ssize, sizeof(ssize));
tt.sptc_v = (NodeID*)memalign(64, 1 * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, 1 * sizeof(EdgeWeight));
tt.sptc_v[0] = sid;
tt.sptc_d[0] = ssize;
fsize = supertokenindex_p[sid].sptc_d[0];
if(ssize == 0) continue;
//if(v < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
tt.sptc_fbv = (unsigned char*)memalign(64, fsize * sizeof(unsigned char));
// unsigned char fb;
char fb;
for (NodeID i = 0; i < fsize; ++i) {
ifs.read((char*)&(tt.sptc_fbv[i]), sizeof(tt.sptc_fbv[i]));
//ifs.read((char*)&fb, sizeof(fb));
// if(v < 10){
// bitset<8> s(tt.sptc_fbv[i]);
// cout << s;
// }
}
//if(v < 10)
// cout << endl;
tt.sptc_sbv = (unsigned char*)memalign(64, ssize * sizeof(unsigned char));
//unsigned char sb;
char sb;
for (NodeID i = 0; i < ssize; ++i) {
ifs.read((char*)&(tt.sptc_sbv[i]), sizeof(tt.sptc_sbv[i]));
//ifs.read((char*)&sb, sizeof(sb));
// if(v < 10){
// bitset<8> s(tt.sptc_sbv[i]);
// cout << s;
//}
}
//if(v < 10)
// cout << endl;
//
}
cout << "loaded standard tokens" << endl;
ifs.close();
}
void save_two_level_labels_d(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
//cout << "1" << endl;
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&anchor_p[v], sizeof(anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs.write((const char*)&r_anchor_p[v], sizeof(r_anchor_p[v]));
// ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
// Store supertokens
// cout << "2" << endl;
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID isize = supertoken_v.sptc_v[0];
ofs.write((const char*)&isize, sizeof(isize));
for(NodeID i = 0; i < isize; ++i){
NodeID tid = supertoken_v.sptc_v[i + 1];
EdgeWeight ew = supertoken_v.sptc_d[i + 1];
ofs.write((const char*)&tid, sizeof(tid));
ofs.write((const char*)&ew, sizeof(ew));
}
}
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = r_supertokenindex_p[v];
NodeID isize = supertoken_v.sptc_v[0];
ofs.write((const char*)&isize, sizeof(isize));
for(NodeID i = 0; i < isize; ++i){
NodeID tid = supertoken_v.sptc_v[i + 1];
EdgeWeight ew = supertoken_v.sptc_d[i + 1];
ofs.write((const char*)&tid, sizeof(tid));
ofs.write((const char*)&ew, sizeof(ew));
}
}
// Store normal tokens
//cout << "3" << endl;
ofs.write((const char*)&numOfTokens, sizeof(numOfTokens));
for (NodeID t = 0; t < numOfTokens; ++t) {
// cout << "31:" << t << endl;
token_t& tt = tokenindex_p[t];
NodeID sid = tt.sptc_v[0];
EdgeWeight ssize = tt.sptc_d[0];
EdgeWeight fsize = supertokenindex_p[sid].sptc_d[0];
ofs.write((const char*)&sid, sizeof(sid));
ofs.write((const char*)&ssize, sizeof(ssize));
// cout << "32:" << t << endl;
if(ssize == 0) continue;
//ofs.write((const char*)&fsize, sizeof(fsize));
//if(t < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
// cout << "33:" << t << endl;
for(NodeID c = 0; c < fsize; ++c){
//char a = tt.sptc_fbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_fbv[c], sizeof(tt.sptc_fbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_fbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
// cout << "34:" << t << endl;
for(NodeID c = 0; c < ssize; ++c){
//char a = tt.sptc_sbv[c];
//ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&tt.sptc_sbv[c], sizeof(tt.sptc_sbv[c]));
// if(t < 10){
// bitset<8> s(tt.sptc_sbv[c]);
// cout << s;
// }
}
//if(t < 10)
// cout << endl;
}
//cout << "4" << endl;
ofs.write((const char*)&r_numOfTokens, sizeof(r_numOfTokens));
for (NodeID t = 0; t < r_numOfTokens; ++t) {
//cout << "41:" << t << endl;
token_t& tt = r_tokenindex_p[t];
NodeID sid = tt.sptc_v[0];
EdgeWeight ssize = tt.sptc_d[0];
EdgeWeight fsize = r_supertokenindex_p[sid].sptc_d[0];
ofs.write((const char*)&sid, sizeof(sid));
ofs.write((const char*)&ssize, sizeof(ssize));
if(ssize == 0) continue;
//ofs.write((const char*)&fsize, sizeof(fsize));
//if(t < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
//cout << "42:" << t << "," << fsize << endl;
for(NodeID c = 0; c < fsize; ++c){
ofs.write((const char*)&tt.sptc_fbv[c], sizeof(tt.sptc_fbv[c]));
}
//cout << "43:" << t << "," << ssize << endl;
for(NodeID c = 0; c < ssize; ++c){
ofs.write((const char*)&tt.sptc_sbv[c], sizeof(tt.sptc_sbv[c]));
}
}
ofs.close();
}
void load_two_level_labels_d(const char* load_filename) {
total_children = 0;
tokenindex_p = NULL;
anchor_p = NULL;
supertokenindex_p = NULL;
r_total_children = 0;
r_tokenindex_p = NULL;
r_anchor_p = NULL;
r_supertokenindex_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
r_anchor_p = (NodeID*)memalign(64, numOfVertices * sizeof(NodeID));
NodeID anchor_id;
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
anchor_p[v] = anchor_id;
}
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&anchor_id, sizeof(anchor_id));
r_anchor_p[v] = anchor_id;
}
//load supertokens
NodeID cid;
EdgeWeight cd;
supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t));
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = supertokenindex_p[v];
NodeID csize;
ifs.read((char*)&csize, sizeof(csize));
supertoken_v.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
supertoken_v.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
supertoken_v.sptc_v[0] = csize;
NodeID intsize = ceil((double)ceil((double)csize / (double)8) / (double)8);
supertoken_v.sptc_d[0] = intsize;
total_children += csize;
for(EdgeWeight i = 0; i < csize; ++i){
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
supertoken_v.sptc_v[i + 1] = cid;
supertoken_v.sptc_d[i + 1] = cd;
}
}
r_supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t));
for (NodeID v = 0; v < numOfVertices; ++v) {
token_t& supertoken_v = r_supertokenindex_p[v];
NodeID csize;
ifs.read((char*)&csize, sizeof(csize));
supertoken_v.sptc_v = (NodeID*)memalign(64, (csize + 1) * sizeof(NodeID));
supertoken_v.sptc_d = (EdgeWeight*)memalign(64, (csize + 1 ) * sizeof(EdgeWeight));
supertoken_v.sptc_v[0] = csize;
NodeID intsize = ceil((double)ceil((double)csize / (double)8) / (double)8);
supertoken_v.sptc_d[0] = intsize;
r_total_children += csize;
for(EdgeWeight i = 0; i < csize; ++i){
ifs.read((char*)&cid, sizeof(cid));
ifs.read((char*)&cd, sizeof(cd));
supertoken_v.sptc_v[i + 1] = cid;
supertoken_v.sptc_d[i + 1] = cd;
}
}
cout << "loaded supertokens" << endl;
cout << "Average Children Super Token #: " << (double)total_children/(double)numOfVertices << endl;
cout << "Average Children Super Token #: " << (double)r_total_children/(double)numOfVertices << endl;
ifs.read((char*)&isize, sizeof(isize));
numOfTokens = isize;
NodeID sid;
EdgeWeight ssize;
EdgeWeight fsize;
cout<< numOfTokens << " tokens in total." << endl;
tokenindex_p = (token_t*)memalign(64, numOfTokens * sizeof(token_t));
for (NodeID v = 0; v < numOfTokens; ++v) {
token_t& tt = tokenindex_p[v];
ifs.read((char*)&sid, sizeof(sid));
ifs.read((char*)&ssize, sizeof(ssize));
tt.sptc_v = (NodeID*)memalign(64, 1 * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, 1 * sizeof(EdgeWeight));
tt.sptc_v[0] = sid;
tt.sptc_d[0] = ssize;
fsize = supertokenindex_p[sid].sptc_d[0];
if(ssize == 0) continue;
//if(v < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
tt.sptc_fbv = (unsigned char*)memalign(64, fsize * sizeof(unsigned char));
// unsigned char fb;
char fb;
for (NodeID i = 0; i < fsize; ++i) {
ifs.read((char*)&(tt.sptc_fbv[i]), sizeof(tt.sptc_fbv[i]));
//ifs.read((char*)&fb, sizeof(fb));
// if(v < 10){
// bitset<8> s(tt.sptc_fbv[i]);
// cout << s;
// }
}
//if(v < 10)
// cout << endl;
tt.sptc_sbv = (unsigned char*)memalign(64, ssize * sizeof(unsigned char));
//unsigned char sb;
char sb;
for (NodeID i = 0; i < ssize; ++i) {
ifs.read((char*)&(tt.sptc_sbv[i]), sizeof(tt.sptc_sbv[i]));
//ifs.read((char*)&sb, sizeof(sb));
// if(v < 10){
// bitset<8> s(tt.sptc_sbv[i]);
// cout << s;
//}
}
//if(v < 10)
// cout << endl;
//
}
ifs.read((char*)&isize, sizeof(isize));
r_numOfTokens = isize;
cout<< r_numOfTokens << " tokens in total." << endl;
r_tokenindex_p = (token_t*)memalign(64, r_numOfTokens * sizeof(token_t));
for (NodeID v = 0; v < r_numOfTokens; ++v) {
token_t& tt = r_tokenindex_p[v];
ifs.read((char*)&sid, sizeof(sid));
ifs.read((char*)&ssize, sizeof(ssize));
tt.sptc_v = (NodeID*)memalign(64, 1 * sizeof(NodeID));
tt.sptc_d = (EdgeWeight*)memalign(64, 1 * sizeof(EdgeWeight));
tt.sptc_v[0] = sid;
tt.sptc_d[0] = ssize;
fsize = r_supertokenindex_p[sid].sptc_d[0];
if(ssize == 0) continue;
//if(v < 10)
// cout << sid << "vs" << fsize << "vs" << ssize << endl;
tt.sptc_fbv = (unsigned char*)memalign(64, fsize * sizeof(unsigned char));
// unsigned char fb;
char fb;
for (NodeID i = 0; i < fsize; ++i) {
ifs.read((char*)&(tt.sptc_fbv[i]), sizeof(tt.sptc_fbv[i]));
//ifs.read((char*)&fb, sizeof(fb));
// if(v < 10){
// bitset<8> s(tt.sptc_fbv[i]);
// cout << s;
// }
}
//if(v < 10)
// cout << endl;
tt.sptc_sbv = (unsigned char*)memalign(64, ssize * sizeof(unsigned char));
//unsigned char sb;
char sb;
for (NodeID i = 0; i < ssize; ++i) {
ifs.read((char*)&(tt.sptc_sbv[i]), sizeof(tt.sptc_sbv[i]));
//ifs.read((char*)&sb, sizeof(sb));
// if(v < 10){
// bitset<8> s(tt.sptc_sbv[i]);
// cout << s;
//}
}
//if(v < 10)
// cout << endl;
//
}
cout << "loaded standard tokens" << endl;
ifs.close();
}
EdgeWeight query_p_two_level(NodeID s, NodeID t, long ts, vector<NodeID>& dis_vec, vector<long>& ts_vec, vector<NodeID>& que, vector<EdgeWeight>& que_d) {
if(s==t) return 0;
EdgeWeight distance = INF_WEIGHT;
NodeID anchor_s = anchor_p[s];
NodeID anchor_t = anchor_p[t];
NodeID que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_s;
que_t1 = que_h;
if(anchor_s < numOfVertices){
if(ts_vec[anchor_s] != ts){
ts_vec[anchor_s] = ts;
dis_vec[anchor_s] = 0;
}
}
else{
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight ssize = token_v.sptc_d[0];
token_t& supertoken_r = supertokenindex_p[r];
EdgeWeight fsize = supertoken_r.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] != ts){
ts_vec[r] = ts;
dis_vec[r] = tdis;
}
EdgeWeight spos = 0;
for(EdgeWeight i = 0; i < fsize; ++i){
unsigned char fmask = token_v.sptc_fbv[i];
bitset<8> fbs(fmask);
for(NodeID j = 0; j < 8; ++j){
if(fbs[ 7 - j]){
unsigned char smask = token_v.sptc_sbv[spos++];
bitset<8> sbs(smask);
for(NodeID k = 0; k < 8; ++k){
if(sbs[7 - k]){
NodeID w = supertoken_r.sptc_v[ (i * 8 + j) * 8 + k + 1];
EdgeWeight w_d = supertoken_r.sptc_d[(i * 8 + j) * 8 + k + 1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] != ts){
ts_vec[w] = ts;
dis_vec[w] = w_d;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
//if(spos == ssize) break;
}
}
//if(spos == ssize) break;
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_t;
if(anchor_t < numOfVertices){
if(ts_vec[anchor_t] == ts){
EdgeWeight current_dis = dis_vec[anchor_t] + 0;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_t1 = que_h;
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight ssize = token_v.sptc_d[0];
token_t& supertoken_r = supertokenindex_p[r];
EdgeWeight fsize = supertoken_r.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] == ts){
EdgeWeight current_dis = dis_vec[r] + tdis;
if(current_dis < distance)
distance = current_dis;
}
EdgeWeight spos = 0;
for(EdgeWeight i = 0; i < fsize; ++i){
unsigned char fmask = token_v.sptc_fbv[i];
bitset<8> fbs(fmask);
for(NodeID j = 0; j < 8; ++j){
if(fbs[7 - j]){
unsigned char smask = token_v.sptc_sbv[spos++];
bitset<8> sbs(smask);
for(NodeID k = 0; k < 8; ++k){
if(sbs[7 - k]){
NodeID w = supertoken_r.sptc_v[ (i * 8 + j) * 8 + k + 1];
EdgeWeight w_d = supertoken_r.sptc_d[(i * 8 + j) * 8 + k + 1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] == ts){
EdgeWeight current_dis = dis_vec[w] + w_d;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
//if(spos == ssize) break;
}
}
//if(spos == ssize) break;
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
return distance;
}
EdgeWeight query_p_two_level_d(NodeID s, NodeID t, long ts, vector<NodeID>& dis_vec, vector<long>& ts_vec, vector<NodeID>& que, vector<EdgeWeight>& que_d) {
if(s==t) return 0;
EdgeWeight distance = INF_WEIGHT;
NodeID anchor_s = anchor_p[s];
NodeID anchor_t = r_anchor_p[t];
NodeID que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_s;
que_t1 = que_h;
if(anchor_s < numOfVertices){
if(ts_vec[anchor_s] != ts){
ts_vec[anchor_s] = ts;
dis_vec[anchor_s] = 0;
}
}
else{
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight ssize = token_v.sptc_d[0];
token_t& supertoken_r = supertokenindex_p[r];
EdgeWeight fsize = supertoken_r.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] != ts){
ts_vec[r] = ts;
dis_vec[r] = tdis;
}
EdgeWeight spos = 0;
for(EdgeWeight i = 0; i < fsize; ++i){
unsigned char fmask = token_v.sptc_fbv[i];
bitset<8> fbs(fmask);
for(NodeID j = 0; j < 8; ++j){
if(fbs[ 7 - j]){
unsigned char smask = token_v.sptc_sbv[spos++];
bitset<8> sbs(smask);
for(NodeID k = 0; k < 8; ++k){
if(sbs[7 - k]){
NodeID w = supertoken_r.sptc_v[ (i * 8 + j) * 8 + k + 1];
EdgeWeight w_d = supertoken_r.sptc_d[(i * 8 + j) * 8 + k + 1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] != ts){
ts_vec[w] = ts;
dis_vec[w] = w_d;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
//if(spos == ssize) break;
}
}
//if(spos == ssize) break;
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
que_t0 = 0, que_t1 = 0, que_h = 0;
que_d[que_h] = 0;
que[que_h++] = anchor_t;
if(anchor_t < numOfVertices){
if(ts_vec[anchor_t] == ts){
EdgeWeight current_dis = dis_vec[anchor_t] + 0;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_t1 = que_h;
for (; que_t0 < que_h;) {
for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) {
NodeID tid = que[que_i];
EdgeWeight tdis = que_d[que_i];
const token_t& token_v = r_tokenindex_p[tid - numOfVertices];
_mm_prefetch(&token_v.sptc_v[0], _MM_HINT_T0);
_mm_prefetch(&token_v.sptc_d[0], _MM_HINT_T0);
NodeID r = token_v.sptc_v[0];
EdgeWeight ssize = token_v.sptc_d[0];
token_t& supertoken_r = r_supertokenindex_p[r];
EdgeWeight fsize = supertoken_r.sptc_d[0];
// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[r] == ts){
EdgeWeight current_dis = dis_vec[r] + tdis;
if(current_dis < distance)
distance = current_dis;
}
EdgeWeight spos = 0;
for(EdgeWeight i = 0; i < fsize; ++i){
unsigned char fmask = token_v.sptc_fbv[i];
bitset<8> fbs(fmask);
for(NodeID j = 0; j < 8; ++j){
if(fbs[7 - j]){
unsigned char smask = token_v.sptc_sbv[spos++];
bitset<8> sbs(smask);
for(NodeID k = 0; k < 8; ++k){
if(sbs[7 - k]){
NodeID w = supertoken_r.sptc_v[ (i * 8 + j) * 8 + k + 1];
EdgeWeight w_d = supertoken_r.sptc_d[(i * 8 + j) * 8 + k + 1] + tdis;
if( w < numOfVertices){// hashing, can be replaced by 1024 linear probing for efficiency.
if(ts_vec[w] == ts){
EdgeWeight current_dis = dis_vec[w] + w_d;
if(current_dis < distance)
distance = current_dis;
}
}else{
que_d[que_h] = w_d;
que[que_h++] = w;
}
}
}
//if(spos == ssize) break;
}
}
//if(spos == ssize) break;
}
}
que_t0 = que_t1;
que_t1 = que_h;
}
}
return distance;
}
};
class Label {
public:
vector<index_t> index_;
index_t_p* index_p;
two_index_t_p* two_index_p;
double GetCurrentTimeSec() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1e-6;
}
Label() {
index_.resize(numOfVertices);
}
~Label() {
Free();
}
EdgeWeight query_p(NodeID s, NodeID t) {
//
//EdgeWeight distance = INF_WEIGHT;
//NodeID *vs = index_p[s].spt_v;
//NodeID *vt = index_p[t].spt_v;
//EdgeWeight* ws = index_p[s].spt_d;
//EdgeWeight* wt = index_p[t].spt_d;
//_mm_prefetch(vs, _MM_HINT_T0);
//_mm_prefetch(vt, _MM_HINT_T0);
//_mm_prefetch(ws, _MM_HINT_T0);
//_mm_prefetch(wt, _MM_HINT_T0);
//for (unsigned i = 0, j = 0; ; ) {
// if (*(vs + i) == *(vt + j)) {
// if (*(vs + i) == numOfVertices) break; // Sentinel
// EdgeWeight td = *(ws + i) + *(wt + j);
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += *(vs + i) < *(vt + j) ? 1 : 0;
// j += *(vs + i) > *(vt + j) ? 1 : 0;
// }
//}
//return distance;
EdgeWeight distance = INF_WEIGHT;
const index_t_p &idx_s = index_p[s];
const index_t_p &idx_t = index_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
return distance;
}
EdgeWeight two_query_p_sequential(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
EdgeWeight ldistance = INF_WEIGHT;
const two_index_t_p &idx_s = two_index_p[s];
const two_index_t_p &idx_t = two_index_p[t];
_mm_prefetch(&idx_s.spt_lv[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_lv[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_ld[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_ld[0], _MM_HINT_T0);
for (uint8_t i = 0, j = 0; ; ) {
uint8_t uv8_1 = idx_s.spt_lv[i], uv8_2 = idx_t.spt_lv[j];
if (uv8_1 == UCHAR_MAX) break; // Sentinel
if (uv8_1 == uv8_2) {
EdgeWeight td = idx_s.spt_ld[i] + idx_t.spt_ld[j];
if (td < ldistance) ldistance = td;
++i;
++j;
}
else {
i += uv8_1 < uv8_2 ? 1 : 0;
j += uv8_1 > uv8_2 ? 1 : 0;
}
}
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
if(distance < ldistance)
return distance;
else
return ldistance;
}
EdgeWeight two_query_p_parallel(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
EdgeWeight ldistance = INF_WEIGHT;
const two_index_t_p &idx_s = two_index_p[s];
const two_index_t_p &idx_t = two_index_p[t];
#pragma omp parallel sections
{
#pragma omp section
{
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
}
#pragma omp section
{
_mm_prefetch(&idx_s.spt_lv[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_lv[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_ld[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_ld[0], _MM_HINT_T0);
for (uint8_t i = 0, j = 0; ; ) {
uint8_t uv8_1 = idx_s.spt_lv[i], uv8_2 = idx_t.spt_lv[j];
if (uv8_1 == UCHAR_MAX) break; // Sentinel
if (uv8_1 == uv8_2) {
EdgeWeight td = idx_s.spt_ld[i] + idx_t.spt_ld[j];
if (td < ldistance) ldistance = td;
++i;
++j;
}
else {
i += uv8_1 < uv8_2 ? 1 : 0;
j += uv8_1 > uv8_2 ? 1 : 0;
}
}
}
}
if(distance < ldistance)
return distance;
else
return ldistance;
}
EdgeWeight query_p_with_nums(NodeID s, NodeID t, int k) {
//
//EdgeWeight distance = INF_WEIGHT;
//NodeID *vs = index_p[s].spt_v;
//NodeID *vt = index_p[t].spt_v;
//EdgeWeight* ws = index_p[s].spt_d;
//EdgeWeight* wt = index_p[t].spt_d;
//_mm_prefetch(vs, _MM_HINT_T0);
//_mm_prefetch(vt, _MM_HINT_T0);
//_mm_prefetch(ws, _MM_HINT_T0);
//_mm_prefetch(wt, _MM_HINT_T0);
//for (unsigned i = 0, j = 0; ; ) {
// if (*(vs + i) == *(vt + j)) {
// if (*(vs + i) == numOfVertices) break; // Sentinel
// EdgeWeight td = *(ws + i) + *(wt + j);
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += *(vs + i) < *(vt + j) ? 1 : 0;
// j += *(vs + i) > *(vt + j) ? 1 : 0;
// }
//}
//return distance;
EdgeWeight distance = INF_WEIGHT;
const index_t_p &idx_s = index_p[s];
const index_t_p &idx_t = index_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
int k1 = k, k2 = k;
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
if (i > k1 || j > k2) break;
}
return distance;
}
EdgeWeight query(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j])
distance = min(distance, (EdgeWeight)(index_s_d[i++] + index_t_d[j++]));
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
EdgeWeight query(NodeID s, NodeID t, NodeID& meet, EdgeWeight& dis1, EdgeWeight& dis2) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
meet = numeric_limits<NodeID>::max();
dis1 = numeric_limits<EdgeWeight>::max();
dis2 = numeric_limits<EdgeWeight>::max();
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j]) {
if (distance > (EdgeWeight)(index_s_d[i] + index_t_d[j])) {
distance = (EdgeWeight)(index_s_d[i] + index_t_d[j]);
meet = index_s[i];
dis1 = index_s_d[i];
dis2 = index_t_d[j];
}
++i; ++j;
}
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
/*EdgeWeight query_new(NodeID s, NodeID t, Ordering& ordering) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j])
distance = min(distance, (EdgeWeight)(index_s_d[i++] + index_t_d[j++]));
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
*/
double avg_size() {
double total = 0;
if(index_.size()!=0){
for (int i = 0; i < numOfVertices; ++i) total += index_[i].spt_v.size();
double avg = total / numOfVertices - 1; // We do not count the trivial label (V, INF_WEIGHT).
return avg;
}
total = 0;
for (int i = 0; i < numOfVertices; ++i) {
int unit_count = 0;
const index_t_p &idx_s = index_p[i];
for(int j = 0; ;){
NodeID v = idx_s.spt_v[j++];
++unit_count;
if( v == numOfVertices) break;
}
total += unit_count;
}
double avg = total / numOfVertices - 1; // We do not count the trivial label (V, INF_WEIGHT).
return avg;
}
/*
NodeID max_size() {
NodeID maxsize = numeric_limits<NodeID>::min();
for (int i = 0; i < V; ++i) maxsize = max(maxsize, index_[i].spt_v.size());
return maxsize;
}*/
void append(NodeID v, NodeID root, EdgeWeight distance) {
index_[v].spt_v.push_back(root);
index_[v].spt_d.push_back(distance);
}
void print_stat() {
cout << "Average Label Size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
void Free() {
if (index_.size() == 0) return;
for (int v = 0; v < numOfVertices; ++v) {
index_[v].spt_v.clear();
index_[v].spt_d.clear();
}
index_.clear();
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
NodeID isize = index_[v].size();
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < index_[v].size(); ++i) {
ofs.write((const char*)&index_[v].spt_v[i], sizeof(index_[v].spt_v[i]));
ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename) {
/* for (NodeID v = 0; v < numOfVertices; ++v) {
free(index_p[v].spt_v);
free(index_p[v].spt_d);
}
*/
//free(index_p);
index_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_p = (index_t_p*)memalign(64, numOfVertices * sizeof(index_t_p));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_p &idx = index_p[v];
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
}
}
ifs.close();
/*
index_.clear();
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_.resize(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&isize, sizeof(isize));
index_[v].spt_v.resize(isize);
index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < index_[v].size(); ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
index_[v].spt_v[i] = hub;
index_[v].spt_d[i] = hub_weight;
}
}
ifs.close();
*/
}
void convert_to_fewerbit(){
two_index_p = NULL;
two_index_p = (two_index_t_p*)memalign(64, numOfVertices * sizeof(two_index_t_p));
double compressed_size = 0;
double total_size = 0;
for (NodeID v = 0; v < numOfVertices; ++v) {
two_index_t_p &idx = two_index_p[v];
index_t_p &idx_original = index_p[v];
NodeID isize = 0;
for(NodeID i = 0; idx_original.spt_v[i] < UCHAR_MAX; ++i){
++isize;
}
idx.spt_lv = (uint8_t*)memalign(64, (isize + 1) * sizeof(uint8_t));
idx.spt_ld = (EdgeWeight*)memalign(64, (isize + 1) * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_lv[i] = idx_original.spt_v[i];
idx.spt_ld[i] = idx_original.spt_d[i];
}
compressed_size += 4 * (isize - 1) - isize;
idx.spt_lv[isize] = UCHAR_MAX;
idx.spt_ld[isize] = INF_WEIGHT;
NodeID larger_size = 0;
for(NodeID i = isize; idx_original.spt_v[i] != numOfVertices; ++i){
++larger_size;
}
larger_size++;
idx.spt_v = (NodeID*)memalign(64, larger_size * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, larger_size * sizeof(EdgeWeight));
for (NodeID i = 0; i < larger_size; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = idx_original.spt_v[i + isize];
idx.spt_d[i] = idx_original.spt_d[i + isize];
}
total_size += 4 * (isize - 1 + larger_size) * 2;
}
cout << "reduce size :" << compressed_size << " out of " << total_size << " saving " << int(compressed_size * 100 / total_size) << "%" << endl;
}
void load_labels_with_k(const char* load_filename, int k) {
/* for (NodeID v = 0; v < numOfVertices; ++v) {
free(index_p[v].spt_v);
free(index_p[v].spt_d);
}
*/
//free(index_p);
long total_amount = 0;
long actual_amount = 0;
index_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_p = (index_t_p*)memalign(64, numOfVertices * sizeof(index_t_p));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_p &idx = index_p[v];
ifs.read((char*)&isize, sizeof(isize));
int actual_isize = k;
if (isize > k) actual_isize = k;
else actual_isize = isize;
total_amount += isize;
actual_amount += actual_isize;
idx.spt_v = (NodeID*)memalign(64, actual_isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, actual_isize * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
if (i > actual_isize) continue;
if (i == actual_isize - 1) {
idx.spt_v[i] = numOfVertices;
idx.spt_d[i] = INF_WEIGHT;
}else {
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
}
}
}
ifs.close();
cout << "Total Labels:" << total_amount << endl;
cout << "Actual Labels:" << actual_amount << endl;
/*
index_.clear();
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_.resize(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&isize, sizeof(isize));
index_[v].spt_v.resize(isize);
index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < index_[v].size(); ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
index_[v].spt_v[i] = hub;
index_[v].spt_d[i] = hub_weight;
}
}
ifs.close();
*/
}
void save_labels_iteration_stats(const char* save_filename) {
vector<NodeID> stat(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
for (NodeID i = 0; i < index_[v].size(); ++i)
stat[index_[v].spt_v[i]]++;
}
ofstream ofs(save_filename);
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs << stat[v] << endl;
}
ofs.close();
}
EdgeWeight query_with_info(NodeID s, NodeID t, query_info& q_info) {
double stime = GetCurrentTimeSec();
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
q_info.meet_node = numOfVertices;
double meet_distance;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j]) {
meet_distance = (EdgeWeight)(index_s_d[i++] + index_t_d[j++]);
if ( distance > meet_distance) {
distance = meet_distance;
q_info.meet_node = index_s[i];
}
}
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
};
stime = GetCurrentTimeSec() - stime;
q_info.time_cost = stime;
if (index_s.size() < index_t.size())
q_info.search_len = index_s.size();
else
q_info.search_len = index_t.size();
return distance;
}
};
class PLabel {
public:
vector<index_t_path> index_;
index_t_path_p* index_p;
double GetCurrentTimeSec() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1e-6;
}
PLabel() {
index_.resize(numOfVertices);
}
~PLabel() {
Free();
}
EdgeWeight query_p(NodeID s, NodeID t) {
//EdgeWeight distance = INF_WEIGHT;
//NodeID *vs = index_p[s].spt_v;
//NodeID *vt = index_p[t].spt_v;
//EdgeWeight* ws = index_p[s].spt_d;
//EdgeWeight* wt = index_p[t].spt_d;
//_mm_prefetch(vs, _MM_HINT_T0);
//_mm_prefetch(vt, _MM_HINT_T0);
//_mm_prefetch(ws, _MM_HINT_T0);
//_mm_prefetch(wt, _MM_HINT_T0);
//for (unsigned i = 0, j = 0; ; ) {
// if (*(vs + i) == *(vt + j)) {
// if (*(vs + i) == numOfVertices) break; // Sentinel
// EdgeWeight td = *(ws + i) + *(wt + j);
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += *(vs + i) < *(vt + j) ? 1 : 0;
// j += *(vs + i) > *(vt + j) ? 1 : 0;
// }
//}
//return distance;
EdgeWeight distance = INF_WEIGHT;
NodeID meet;
const index_t_path_p &idx_s = index_p[s];
const index_t_path_p &idx_t = index_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) {
distance = td;
}
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
return distance;
}
EdgeWeight query(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j])
distance = min(distance, (EdgeWeight)(index_s_d[i++] + index_t_d[j++]));
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
EdgeWeight query(NodeID s, NodeID t, NodeID& meet, EdgeWeight& dis1, EdgeWeight& dis2) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
meet = numeric_limits<NodeID>::max();
dis1 = numeric_limits<EdgeWeight>::max();
dis2 = numeric_limits<EdgeWeight>::max();
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j]) {
if (distance >(EdgeWeight)(index_s_d[i] + index_t_d[j])) {
distance = (EdgeWeight)(index_s_d[i] + index_t_d[j]);
meet = index_s[i];
dis1 = index_s_d[i];
dis2 = index_t_d[j];
}
++i; ++j;
}
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
EdgeWeight query_path(NodeID s, NodeID t, vector<NodeID>& rank, vector<NodeID>& inv) {
EdgeWeight distance = INF_WEIGHT;
NodeID meetnode = numOfVertices;
NodeID s_parent;
NodeID t_parent;
const index_t_path_p &idx_s = index_p[s];
const index_t_path_p &idx_t = index_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_p[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_p[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) {
distance = td;
// if (v1 < meetnode) {
meetnode = v1;
s_parent = idx_s.spt_p[i];
t_parent = idx_t.spt_p[j];
//}
}
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
//Next, retrieve path from s - meetnode and meetnode - t.
vector<NodeID> path_from_s;
vector<NodeID> path_to_t;
path_from_s.push_back(s_parent);
path_to_t.push_back(t_parent);
int operation = 0;
/* if (s == 194569 && t == 20072)
cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
NodeID inv_meetnode = inv[meetnode];
while (path_from_s.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "s meet:" << path_from_s.back() << endl;*/
const index_t_path_p &idx_from_s = index_p[path_from_s.back()];
_mm_prefetch(&idx_from_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_from_s.spt_p[0], _MM_HINT_T0);
// vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
for (int i = 0; ; ++i) {
operation++;
if (idx_from_s.spt_v[i] == numOfVertices) break;
if (idx_from_s.spt_v[i] == meetnode) {
path_from_s.push_back(idx_from_s.spt_p[i]);
break;
}
}
}
while (path_to_t.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "t meet:" << path_to_t.back() << endl;*/
// vector<NodeID>& index_to_t = index_[path_to_t.back()].spt_v;
const index_t_path_p &idx_to_t = index_p[path_to_t.back()];
_mm_prefetch(&idx_to_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_to_t.spt_p[0], _MM_HINT_T0);
for (int i = 0; ; ++i) {
operation++;
if (idx_to_t.spt_v[i] == numOfVertices) break;
if (idx_to_t.spt_v[i] == meetnode) {
path_to_t.push_back(idx_to_t.spt_p[i]);
break;
}
}
}
distance = 0;
distance += path_from_s.size() + path_to_t.size();
// return distance;
return distance;
//EdgeWeight distance = INF_WEIGHT;
//vector<NodeID>& index_s = index_[s].spt_v;
//vector<EdgeWeight>& index_s_d = index_[s].spt_d;
//vector<NodeID>& bindex_t = index_[t].spt_v;
//vector<EdgeWeight>& bindex_t_d = index_[t].spt_d;
//NodeID meetnode = numOfVertices;
//int s_parent;
//int t_parent;
//for (int i = 0, j = 0; i < index_s.size(), j < bindex_t.size(); ) {
// if (index_s[i] == bindex_t[j]) {
// if (distance >(EdgeWeight)(index_s_d[i] + bindex_t_d[j])) {
// distance = (EdgeWeight)(index_s_d[i] + bindex_t_d[j]);
// if (index_s[i] < meetnode) {
// meetnode = index_s[i];
// s_parent = index_[s].spt_p[i];
// t_parent = index_[t].spt_p[j];
// }
// }
// //distance = min(distance, (EdgeWeight)(index_s_d[i] + bindex_t_d[j]));
// ++i;
// ++j;
// }
// else {
// if (index_s[i] < bindex_t[j])
// ++i;
// else
// ++j;
// }
//}
////Next, retrieve path from s - meetnode and meetnode - t.
//vector<NodeID> path_from_s;
//vector<NodeID> path_to_t;
//path_from_s.push_back(s_parent);
//path_to_t.push_back(t_parent);
///* if (s == 194569 && t == 20072)
//cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
//while (path_from_s.back() != inv[meetnode]) {
// /*if (s == 194569 && t == 20072)
// cout << "s meet:" << path_from_s.back() << endl;*/
// vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
// for (int i = 0; i < index_from_s.size(); ++i) {
// if (index_from_s[i] == meetnode) {
// path_from_s.push_back(index_[path_from_s.back()].spt_p[i]);
// break;
// }
// }
//}
//while (path_to_t.back() != inv[meetnode]) {
// /*if (s == 194569 && t == 20072)
// cout << "t meet:" << path_to_t.back() << endl;*/
// vector<NodeID>& index_to_t = index_[path_to_t.back()].spt_v;
// for (int i = 0; i < index_to_t.size(); ++i) {
// if (index_to_t[i] == meetnode) {
// path_to_t.push_back(index_[path_to_t.back()].spt_p[i]);
// break;
// }
// }
//}
////for (int i = 0; i < path_from_s.size(); ++i)
//// path_from_s[i] = inv[path_from_s[i]];
////for (int i = 0; i < path_to_t.size(); ++i)
//// path_to_t[i] = inv[path_to_t[i]];
//return path_from_s.size() + path_to_t.size();
}
EdgeWeight query_path_check(NodeID s, NodeID t, vector<NodeID>& rank, vector<NodeID>& inv) {
EdgeWeight distance = INF_WEIGHT;
NodeID meetnode = numOfVertices;
NodeID s_parent;
NodeID t_parent;
const index_t_path_p &idx_s = index_p[s];
const index_t_path_p &idx_t = index_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_p[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_p[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) {
distance = td;
// if (v1 < meetnode) {
meetnode = v1;
s_parent = idx_s.spt_p[i];
t_parent = idx_t.spt_p[j];
//}
}
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
NodeID inv_meetnode = inv[meetnode];
//Next, retrieve path from s - meetnode and meetnode - t.
vector<NodeID> path_from_s;
vector<NodeID> path_to_t;
if(s !=inv_meetnode)
path_from_s.push_back(s);
path_from_s.push_back(s_parent);
if (t != inv_meetnode)
path_to_t.push_back(t);
path_to_t.push_back(t_parent);
/* if (s == 194569 && t == 20072)
cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
while (path_from_s.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "s meet:" << path_from_s.back() << endl;*/
const index_t_path_p &idx_from_s = index_p[path_from_s.back()];
_mm_prefetch(&idx_from_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_from_s.spt_p[0], _MM_HINT_T0);
// vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
for (int i = 0; ; ++i) {
if (idx_from_s.spt_v[i] == numOfVertices) break;
if (idx_from_s.spt_v[i] == meetnode) {
path_from_s.push_back(idx_from_s.spt_p[i]);
break;
}
}
}
while (path_to_t.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "t meet:" << path_to_t.back() << endl;*/
// vector<NodeID>& index_to_t = index_[path_to_t.back()].spt_v;
const index_t_path_p &idx_to_t = index_p[path_to_t.back()];
_mm_prefetch(&idx_to_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_to_t.spt_p[0], _MM_HINT_T0);
for (int i = 0; ; ++i) {
if (idx_to_t.spt_v[i] == numOfVertices) break;
if (idx_to_t.spt_v[i] == meetnode) {
path_to_t.push_back(idx_to_t.spt_p[i]);
break;
}
}
}
//return distance;
EdgeWeight alldis = 0;
if (path_from_s.size() == 1)
if (s != inv_meetnode)
alldis += query_p(s, inv_meetnode);
if (path_to_t.size() == 1)
if (t != inv_meetnode)
alldis += query_p(t, inv_meetnode);
for (int i = 0; i < path_from_s.size() - 1; ++i) {
alldis += query_p(path_from_s[i], path_from_s[i + 1]);
//cout << "s " << path_from_s[i] << "," << path_from_s[i + 1] << endl;
}
for (int i = 0; i < path_to_t.size() - 1; ++i) {
alldis += query_p(path_to_t[i], path_to_t[i + 1]);
//cout <<"t " << path_to_t[i] << "," << path_to_t[i + 1] << endl;
}
/*if (distance != alldis)
cout << "a?" << endl;*/
//cout << distance << "," << alldis << "," << path_from_s.size() + path_to_t.size() << endl;
// cout << s << "," << t << "," << inv_meetnode << " " << distance << "vs." << alldis << endl;
return distance;
}
//EdgeWeight query_path_check(NodeID s, NodeID t, vector<NodeID>& rank, vector<NodeID>& inv) {
// EdgeWeight distance = INF_WEIGHT;
// NodeID meetnode = numOfVertices;
// NodeID s_parent;
// NodeID t_parent;
// const index_t_path_p &idx_s = index_p[s];
// const index_t_path_p &idx_t = index_p[t];
// _mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
// _mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
// _mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
// _mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
// _mm_prefetch(&idx_s.spt_p[0], _MM_HINT_T0);
// _mm_prefetch(&idx_t.spt_p[0], _MM_HINT_T0);
// for (int i = 0, j = 0; ; ) {
// NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
// if (v1 == numOfVertices) break; // Sentinel
// if (v1 == v2) {
// EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
// if (td < distance) {
// distance = td;
// if (v1 < meetnode) {
// meetnode = v1;
// s_parent = idx_s.spt_p[i];
// t_parent = idx_t.spt_p[j];
// }
// }
// ++i;
// ++j;
// }
// else {
// i += v1 < v2 ? 1 : 0;
// j += v1 > v2 ? 1 : 0;
// }
// }
// //Next, retrieve path from s - meetnode and meetnode - t.
// vector<NodeID> path_from_s;
// vector<NodeID> path_to_t;
// path_from_s.push_back(s_parent);
// path_to_t.push_back(t_parent);
// /* if (s == 194569 && t == 20072)
// cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
// NodeID inv_meetnode = inv[meetnode];
// while (path_from_s.back() != inv_meetnode) {
// /*if (s == 194569 && t == 20072)
// cout << "s meet:" << path_from_s.back() << endl;*/
// const index_t_path_p &idx_from_s = index_p[path_from_s.back()];
// _mm_prefetch(&idx_from_s.spt_v[0], _MM_HINT_T0);
// _mm_prefetch(&idx_from_s.spt_p[0], _MM_HINT_T0);
// // vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
// for (int i = 0; ; ++i) {
// if (idx_from_s.spt_v[i] == numOfVertices) break;
// if (idx_from_s.spt_v[i] == meetnode) {
// path_from_s.push_back(idx_from_s.spt_p[i]);
// break;
// }
// }
// }
// while (path_to_t.back() != inv_meetnode) {
// /*if (s == 194569 && t == 20072)
// cout << "t meet:" << path_to_t.back() << endl;*/
// // vector<NodeID>& index_to_t = index_[path_to_t.back()].spt_v;
// const index_t_path_p &idx_to_t = index_p[path_to_t.back()];
// _mm_prefetch(&idx_to_t.spt_v[0], _MM_HINT_T0);
// _mm_prefetch(&idx_to_t.spt_p[0], _MM_HINT_T0);
// for (int i = 0; ; ++i) {
// if (idx_to_t.spt_v[i] == numOfVertices) break;
// if (idx_to_t.spt_v[i] == meetnode) {
// path_to_t.push_back(idx_to_t.spt_p[i]);
// break;
// }
// }
// }
// EdgeWeight path_from_s = 0;
// for (int i = 0; i < path_from_s.size(); ++i) {
// }
//
// return distance;
//
//}
/*EdgeWeight query_new(NodeID s, NodeID t, Ordering& ordering) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j])
distance = min(distance, (EdgeWeight)(index_s_d[i++] + index_t_d[j++]));
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
}
return distance;
}
*/
double avg_size() {
double total = 0;
for (int i = 0; i < numOfVertices; ++i) total += index_[i].spt_v.size();
double avg = total / numOfVertices - 1; // We do not count the trivial label (V, INF_WEIGHT).
return avg;
}
/*
NodeID max_size() {
NodeID maxsize = numeric_limits<NodeID>::min();
for (int i = 0; i < V; ++i) maxsize = max(maxsize, index_[i].spt_v.size());
return maxsize;
}*/
void append(NodeID v, NodeID root, EdgeWeight distance) {
index_[v].spt_v.push_back(root);
index_[v].spt_d.push_back(distance);
}
void print_stat() {
cout << "Average Label Size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
void Free() {
if (index_.size() == 0) return;
for (int v = 0; v < numOfVertices; ++v) {
index_[v].spt_v.clear();
index_[v].spt_d.clear();
}
index_.clear();
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
NodeID isize = index_[v].size();
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < index_[v].size(); ++i) {
ofs.write((const char*)&index_[v].spt_v[i], sizeof(index_[v].spt_v[i]));
ofs.write((const char*)&index_[v].spt_p[i], sizeof(index_[v].spt_p[i]));
ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename) {
/* for (NodeID v = 0; v < numOfVertices; ++v) {
free(index_p[v].spt_v);
free(index_p[v].spt_d);
}
*/
//free(index_p);
index_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_p = (index_t_path_p*)memalign(64, numOfVertices * sizeof(index_t_path_p));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_path_p &idx = index_p[v];
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_p = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
NodeID hub_parent;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_parent, sizeof(hub_parent));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = hub;
idx.spt_p[i] = hub_parent;
idx.spt_d[i] = hub_weight;
}
}
ifs.close();
/*
index_.clear();
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_.resize(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&isize, sizeof(isize));
index_[v].spt_v.resize(isize);
index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < index_[v].size(); ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
index_[v].spt_v[i] = hub;
index_[v].spt_d[i] = hub_weight;
}
}
ifs.close();
*/
}
void save_labels_iteration_stats(const char* save_filename) {
vector<NodeID> stat(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
for (NodeID i = 0; i < index_[v].size(); ++i)
stat[index_[v].spt_v[i]]++;
}
ofstream ofs(save_filename);
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs << stat[v] << endl;
}
ofs.close();
}
EdgeWeight query_with_info(NodeID s, NodeID t, query_info& q_info) {
double stime = GetCurrentTimeSec();
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& index_t = index_[t].spt_v;
vector<EdgeWeight>& index_t_d = index_[t].spt_d;
q_info.meet_node = numOfVertices;
double meet_distance;
for (int i = 0, j = 0; i < index_s.size(), j < index_t.size(); ) {
if (index_s[i] == index_t[j]) {
meet_distance = (EdgeWeight)(index_s_d[i++] + index_t_d[j++]);
if (distance > meet_distance) {
distance = meet_distance;
q_info.meet_node = index_s[i];
}
}
else {
if (index_s[i] < index_t[j])
++i;
else
++j;
}
};
stime = GetCurrentTimeSec() - stime;
q_info.time_cost = stime;
if (index_s.size() < index_t.size())
q_info.search_len = index_s.size();
else
q_info.search_len = index_t.size();
return distance;
}
};
class DLabel : public Label {
public:
vector<index_t> bindex_; // Backward labels.
index_t_p* bindex_p;
two_index_t_p* b_two_index_p;
DLabel() {
index_.resize(numOfVertices);
bindex_.resize(numOfVertices);
}
~DLabel() {
Free();
}
EdgeWeight query(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& bindex_t = bindex_[t].spt_v;
vector<EdgeWeight>& bindex_t_d = bindex_[t].spt_d;
for (int i = 0, j = 0; i < index_s.size(), j < bindex_t.size(); ) {
if (index_s[i] == bindex_t[j]) {
distance = min(distance, (EdgeWeight)(index_s_d[i] + bindex_t_d[j]));
++i;
++j;
}
else {
if (index_s[i] < bindex_t[j])
++i;
else
++j;
}
}
return distance;
}
EdgeWeight query(NodeID s, NodeID t, NodeID& meet, EdgeWeight& dis1, EdgeWeight& dis2) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& bindex_t = bindex_[t].spt_v;
vector<EdgeWeight>& bindex_t_d = bindex_[t].spt_d;
meet = numeric_limits<NodeID>::max();
dis1 = numeric_limits<EdgeWeight>::max();
dis2 = numeric_limits<EdgeWeight>::max();
for (int i = 0, j = 0; i < index_s.size(), j < bindex_t.size(); ) {
if (index_s[i] == bindex_t[j]) {
if (distance > (EdgeWeight)(index_s_d[i] + bindex_t_d[j])) {
distance = (EdgeWeight)(index_s_d[i] + bindex_t_d[j]);
meet = index_s[i];
dis1 = index_s_d[i];
dis2 = bindex_t_d[j];
}
++i;
++j;
}
else {
if (index_s[i] < bindex_t[j])
++i;
else
++j;
}
}
return distance;
}
inline EdgeWeight query_p(NodeID s, NodeID t) {
//EdgeWeight distance = INF_WEIGHT;
//
////const index_t_p &idx_s = index_p[s];
////const index_t_p &idx_t = bindex_p[t];
//NodeID *vs = index_p[s].spt_v;
//NodeID *vt = bindex_p[t].spt_v;
//EdgeWeight* ws = index_p[s].spt_d;
//EdgeWeight* wt = bindex_p[t].spt_d;
//_mm_prefetch(vs, _MM_HINT_T0);
//_mm_prefetch(vt, _MM_HINT_T0);
//_mm_prefetch(ws, _MM_HINT_T0);
//_mm_prefetch(wt, _MM_HINT_T0);
//for (unsigned i = 0, j = 0; ; ) {
// if (*(vs + i) == *(vt + j)) {
// if (*(vs + i) == numOfVertices) break; // Sentinel
// EdgeWeight td = *(ws + i) + *(wt + j);
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += *(vs + i) < *(vt + j) ? 1 : 0;
// j += *(vs + i) > *(vt + j) ? 1 : 0;
// }
//}
//return distance;
EdgeWeight distance = INF_WEIGHT;
const index_t_p &idx_s = index_p[s];
const index_t_p &idx_t = bindex_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == v2) {
if (v1 == numOfVertices) break; // Sentinel
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
return distance;
}
EdgeWeight query_with_info(NodeID s, NodeID t, query_info& q_info) {
double stime = GetCurrentTimeSec();
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
// vector<NodeID>& index_t = index_[t].spt_v;
// vector<EdgeWeight>& index_t_d = index_[t].spt_d;
vector<NodeID>& bindex_t = bindex_[t].spt_v;
vector<EdgeWeight>& bindex_t_d = bindex_[t].spt_d;
q_info.meet_node = numOfVertices;
double meet_distance;
for (int i = 0, j = 0; i < index_s.size(), j < bindex_t.size(); ) {
if (index_s[i] == bindex_t[j]) {
meet_distance = (EdgeWeight)(index_s_d[i++] + bindex_t[j++]);
if (distance > meet_distance) {
distance = meet_distance;
q_info.meet_node = index_s[i];
}
}
else {
if (index_s[i] < bindex_t[j])
++i;
else
++j;
}
};
stime = GetCurrentTimeSec() - stime;
q_info.time_cost = stime;
if (index_s.size() < bindex_t.size())
q_info.search_len = index_s.size();
else
q_info.search_len = bindex_t.size();
return distance;
}
void append(NodeID v, NodeID root, EdgeWeight distance, bool forward) { // forward(backward) search from root to vertex v.
if (forward) { // forward search from root to vertex v, hence append (root, distance) to backward index of vertex v.
bindex_[v].spt_v.push_back(root);
bindex_[v].spt_d.push_back(distance);
}
else { // backward search from root to vertex v, hence append (root, distance) to forward index of vertex v.
index_[v].spt_v.push_back(root);
index_[v].spt_d.push_back(distance);
}
}
void Free() {
if (index_.size() == 0 || bindex_.size() == 0) return;
for (int v = 0; v < numOfVertices; ++v) {
index_[v].spt_v.clear();
index_[v].spt_d.clear();
if (DIRECTED_FLAG == true) {
bindex_[v].spt_v.clear();
bindex_[v].spt_d.clear();
}
}
index_.clear();
bindex_.clear();
}
double avg_size() {
double total = 0;
for (int i = 0; i < numOfVertices; ++i) {
total += index_[i].spt_v.size() ;
total += bindex_[i].spt_v.size();
}
double avg = total / numOfVertices / 2 - 1; // We do not count the trivial labels (V, INF_WEIGHT).
return avg;
}
void print_stat() {
cout << "Average Label Size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
int isize = index_[v].size();
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < index_[v].size(); ++i) {
ofs.write((const char*)&index_[v].spt_v[i], sizeof(index_[v].spt_v[i]));
ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
int bisize = bindex_[v].size();
ofs.write((const char*)&bisize, sizeof(bisize));
for (NodeID i = 0; i < bindex_[v].size(); ++i) {
ofs.write((const char*)&bindex_[v].spt_v[i], sizeof(bindex_[v].spt_v[i]));
ofs.write((const char*)&bindex_[v].spt_d[i], sizeof(bindex_[v].spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename) {
cout << "Loading Labels" << endl;
/*
for (NodeID v = 0; v < numOfVertices; ++v) {
free(index_p[v].spt_v);
free(index_p[v].spt_d);
}*/
//free(index_p);
index_p = NULL;
bindex_p = NULL;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_p = (index_t_p*)memalign(64, numOfVertices * sizeof(index_t_p));
bindex_p = (index_t_p*)memalign(64, numOfVertices * sizeof(index_t_p));
cout << numOfVertices << " vertices." << endl;
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_p &idx = index_p[v];
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
}
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
index_t_p &bidx = bindex_p[v];
ifs.read((char*)&isize, sizeof(isize));
bidx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
bidx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
bidx.spt_v[i] = hub;
bidx.spt_d[i] = hub_weight;
}
}
ifs.close();
/*
index_.clear();
bindex_.clear();
ifs.open(load_filename, ios::binary | ios::in);
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_.resize(numOfVertices);
bindex_.resize(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&isize, sizeof(isize));
index_[v].spt_v.resize(isize);
index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < index_[v].size(); ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
index_[v].spt_v[i] = hub;
index_[v].spt_d[i] = hub_weight;
}
ifs.read((char*)&isize, sizeof(isize));
bindex_[v].spt_v.resize(isize);
bindex_[v].spt_d.resize(isize);
for (NodeID i = 0; i < bindex_[v].size(); ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
bindex_[v].spt_v[i] = hub;
bindex_[v].spt_d[i] = hub_weight;
}
}
ifs.close();
*/
/* for (int i = 0; i < numOfVertices; ++i) {
for (int j = 0; j < index_[i].size(); ++j)
if (index_[i].spt_v[j] != index_p[i].spt_v[j])
cout << "warning." << endl;
}*/
}
void convert_to_fewerbit(){
two_index_p = NULL;
b_two_index_p = NULL;
two_index_p = (two_index_t_p*)memalign(64, numOfVertices * sizeof(two_index_t_p));
b_two_index_p = (two_index_t_p*)memalign(64, numOfVertices * sizeof(two_index_t_p));
for (NodeID v = 0; v < numOfVertices; ++v) {
two_index_t_p &idx = two_index_p[v];
index_t_p &idx_original = index_p[v];
NodeID isize = 0;
for(NodeID i = 0; idx_original.spt_v[i] < UCHAR_MAX; ++i){
++isize;
}
idx.spt_lv = (uint8_t*)memalign(64, (isize + 1) * sizeof(uint8_t));
idx.spt_ld = (EdgeWeight*)memalign(64, (isize + 1) * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_lv[i] = idx_original.spt_v[i];
idx.spt_ld[i] = idx_original.spt_d[i];
}
idx.spt_lv[isize] = UCHAR_MAX;
idx.spt_ld[isize] = INF_WEIGHT;
NodeID larger_size = 0;
for(NodeID i = isize; idx_original.spt_v[i] != numOfVertices; ++i){
++larger_size;
}
idx.spt_v = (NodeID*)memalign(64, larger_size * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, larger_size * sizeof(EdgeWeight));
for (NodeID i = 0; i < larger_size; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = idx_original.spt_v[i + isize];
idx.spt_d[i] = idx_original.spt_d[i + isize];
}
two_index_t_p &b_idx = b_two_index_p[v];
index_t_p &b_idx_original = bindex_p[v];
isize = 0;
for(NodeID i = 0; b_idx_original.spt_v[i] < UCHAR_MAX; ++i){
++isize;
}
b_idx.spt_lv = (uint8_t*)memalign(64, (isize + 1) * sizeof(uint8_t));
b_idx.spt_ld = (EdgeWeight*)memalign(64, (isize + 1) * sizeof(EdgeWeight));
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < isize; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
b_idx.spt_lv[i] = b_idx_original.spt_v[i];
b_idx.spt_ld[i] = b_idx_original.spt_d[i];
}
b_idx.spt_lv[isize] = UCHAR_MAX;
b_idx.spt_ld[isize] = INF_WEIGHT;
larger_size = 0;
for(NodeID i = isize; b_idx_original.spt_v[i] != numOfVertices; ++i){
++larger_size;
}
b_idx.spt_v = (NodeID*)memalign(64, larger_size * sizeof(NodeID));
b_idx.spt_d = (EdgeWeight*)memalign(64, larger_size * sizeof(EdgeWeight));
for (NodeID i = 0; i < larger_size; ++i) {
uint8_t hub;
EdgeWeight hub_weight;
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
b_idx.spt_v[i] = b_idx_original.spt_v[i + isize];
b_idx.spt_d[i] = b_idx_original.spt_d[i + isize];
}
}
}
void save_labels_iteration_stats(const char* save_filename) {
vector<NodeID> stat(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
for (NodeID i = 0; i < index_[v].size(); ++i)
stat[index_[v].spt_v[i]]++;
for (NodeID i = 0; i < bindex_[v].size(); ++i)
stat[bindex_[v].spt_v[i]]++;
}
ofstream ofs(save_filename);
for (NodeID v = 0; v < numOfVertices; ++v) {
ofs << stat[v] << endl;
}
ofs.close();
}
};
class DPLabel{
public:
vector<index_t_path> index_;
vector<index_t_path> bindex_; // Backward labels.
index_t_path_p* index_p;
index_t_path_p* bindex_p;
DPLabel() {
index_.resize(numOfVertices);
bindex_.resize(numOfVertices);
}
~DPLabel() {
Free();
}
inline EdgeWeight query_path(NodeID s, NodeID t, vector<NodeID>& rank, vector<NodeID>& inv) {
EdgeWeight distance = INF_WEIGHT;
NodeID meetnode = numOfVertices;
NodeID s_parent;
NodeID t_parent;
const index_t_path_p &idx_s = index_p[s];
const index_t_path_p &idx_t = bindex_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_p[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_p[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) {
distance = td;
//if (v1 < meetnode) {
meetnode = v1;
s_parent = idx_s.spt_p[i];
t_parent = idx_t.spt_p[j];
// }
}
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
//Next, retrieve path from s - meetnode and meetnode - t.
vector<NodeID> path_from_s;
vector<NodeID> path_to_t;
path_from_s.push_back(s_parent);
path_to_t.push_back(t_parent);
/* if (s == 194569 && t == 20072)
cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
NodeID inv_meetnode = inv[meetnode];
while (path_from_s.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "s meet:" << path_from_s.back() << endl;*/
const index_t_path_p &idx_from_s = index_p[path_from_s.back()];
_mm_prefetch(&idx_from_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_from_s.spt_p[0], _MM_HINT_T0);
// vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
for (int i = 0; ; ++i) {
if (idx_from_s.spt_v[i] == numOfVertices) break;
if (idx_from_s.spt_v[i] == meetnode) {
path_from_s.push_back(idx_from_s.spt_p[i]);
break;
}
}
}
while (path_to_t.back() != inv_meetnode) {
/*if (s == 194569 && t == 20072)
cout << "t meet:" << path_to_t.back() << endl;*/
// vector<NodeID>& index_to_t = index_[path_to_t.back()].spt_v;
const index_t_path_p &idx_to_t = bindex_p[path_to_t.back()];
_mm_prefetch(&idx_to_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_to_t.spt_p[0], _MM_HINT_T0);
for (int i = 0; ; ++i) {
if (idx_to_t.spt_v[i] == numOfVertices) break;
if (idx_to_t.spt_v[i] == meetnode) {
path_to_t.push_back(idx_to_t.spt_p[i]);
break;
}
}
}
return distance;
}
EdgeWeight query_path_p(NodeID s, NodeID t, vector<NodeID>& rank, vector<NodeID>& inv) {
EdgeWeight distance = INF_WEIGHT;
vector<NodeID>& index_s = index_[s].spt_v;
vector<EdgeWeight>& index_s_d = index_[s].spt_d;
vector<NodeID>& bindex_t = bindex_[t].spt_v;
vector<EdgeWeight>& bindex_t_d = bindex_[t].spt_d;
NodeID meetnode = numOfVertices;
int s_parent;
int t_parent;
for (int i = 0, j = 0; i < index_s.size(), j < bindex_t.size(); ) {
if (index_s[i] == bindex_t[j]) {
if (distance >(EdgeWeight)(index_s_d[i] + bindex_t_d[j])) {
distance = (EdgeWeight)(index_s_d[i] + bindex_t_d[j]);
// if (index_s[i] < meetnode) {
meetnode = index_s[i];
s_parent = index_[s].spt_p[i];
t_parent = index_[t].spt_p[j];
// }
}
//distance = min(distance, (EdgeWeight)(index_s_d[i] + bindex_t_d[j]));
++i;
++j;
}
else {
if (index_s[i] < bindex_t[j])
++i;
else
++j;
}
}
//Next, retrieve path from s - meetnode and meetnode - t.
vector<NodeID> path_from_s;
vector<NodeID> path_to_t;
path_from_s.push_back(s_parent);
path_to_t.push_back(t_parent);
/* if (s == 194569 && t == 20072)
cout << "debug." << " meet: " << meetnode << " sparent:" << s_parent << " tparent:" << t_parent << endl;*/
while (path_from_s.back() != inv[meetnode]) {
/*if (s == 194569 && t == 20072)
cout << "s meet:" << path_from_s.back() << endl;*/
vector<NodeID>& index_from_s = index_[path_from_s.back()].spt_v;
for (int i = 0; i < index_from_s.size(); ++i) {
if (index_from_s[i] == meetnode) {
path_from_s.push_back(index_[path_from_s.back()].spt_p[i]);
break;
}
}
}
while (path_to_t.back() != inv[meetnode]) {
/*if (s == 194569 && t == 20072)
cout << "t meet:" << path_to_t.back() << endl;*/
vector<NodeID>& index_to_t = bindex_[path_to_t.back()].spt_v;
for (int i = 0; i < index_to_t.size(); ++i) {
if (index_to_t[i] == meetnode) {
path_to_t.push_back(bindex_[path_to_t.back()].spt_p[i]);
break;
}
}
}
//for (int i = 0; i < path_from_s.size(); ++i)
// path_from_s[i] = inv[path_from_s[i]];
//for (int i = 0; i < path_to_t.size(); ++i)
// path_to_t[i] = inv[path_to_t[i]];
return path_from_s.size() + path_to_t.size();
}
void Free() {
if (index_.size() == 0 || bindex_.size() == 0) return;
for (int v = 0; v < numOfVertices; ++v) {
index_[v].spt_v.clear();
index_[v].spt_d.clear();
if (DIRECTED_FLAG == true) {
bindex_[v].spt_v.clear();
bindex_[v].spt_d.clear();
}
}
index_.clear();
bindex_.clear();
}
double avg_size() {
double total = 0;
for (int i = 0; i < numOfVertices; ++i) {
total += index_[i].spt_v.size();
total += bindex_[i].spt_v.size();
}
double avg = total / numOfVertices / 2 - 1; // We do not count the trivial labels (V, INF_WEIGHT).
return avg;
}
void print_stat() {
cout << "Average Label Size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
for (NodeID v = 0; v < numOfVertices; ++v) {
int isize = index_[v].size();
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < index_[v].size(); ++i) {
ofs.write((const char*)&index_[v].spt_v[i], sizeof(index_[v].spt_v[i]));
ofs.write((const char*)&index_[v].spt_p[i], sizeof(index_[v].spt_p[i]));
ofs.write((const char*)&index_[v].spt_d[i], sizeof(index_[v].spt_d[i]));
}
int bisize = bindex_[v].size();
ofs.write((const char*)&bisize, sizeof(bisize));
for (NodeID i = 0; i < bindex_[v].size(); ++i) {
ofs.write((const char*)&bindex_[v].spt_v[i], sizeof(bindex_[v].spt_v[i]));
ofs.write((const char*)&bindex_[v].spt_p[i], sizeof(bindex_[v].spt_p[i]));
ofs.write((const char*)&bindex_[v].spt_d[i], sizeof(bindex_[v].spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename) {
index_p = NULL;
bindex_p = NULL;
ifstream ifs(load_filename, ios::binary | ios::in);
NodeID isize;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_p = (index_t_path_p*)memalign(64, numOfVertices * sizeof(index_t_path_p));
bindex_p = (index_t_path_p*)memalign(64, numOfVertices * sizeof(index_t_path_p));
cout << numOfVertices << " vertices." << endl;
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_path_p &idx = index_p[v];
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_p = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
NodeID hub_parent;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_parent, sizeof(hub_parent));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
idx.spt_p[i] = hub_parent;
}
// index_[v].spt_v.resize(isize);
// index_[v].spt_d.resize(isize);
index_t_path_p &bidx = bindex_p[v];
ifs.read((char*)&isize, sizeof(isize));
bidx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
bidx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
bidx.spt_p = (NodeID*)memalign(64, isize * sizeof(NodeID));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
NodeID hub_parent;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_parent, sizeof(hub_parent));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
//index_[v].spt_v[i] = hub;
//index_[v].spt_d[i] = hub_weight;
bidx.spt_v[i] = hub;
bidx.spt_d[i] = hub_weight;
bidx.spt_p[i] = hub_parent;
}
}
ifs.close();
/*index_.clear();
bindex_.clear();
ifstream ifs(load_filename, ios::binary | ios::in);
NodeID isize;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
index_.resize(numOfVertices);
bindex_.resize(numOfVertices);
for (NodeID v = 0; v < numOfVertices; ++v) {
ifs.read((char*)&isize, sizeof(isize));
index_[v].spt_v.resize(isize);
index_[v].spt_p.resize(isize);
index_[v].spt_d.resize(isize);
for (NodeID i = 0; i < index_[v].size(); ++i) {
NodeID hub;
NodeID parent;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&parent, sizeof(parent));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
index_[v].spt_v[i] = hub;
index_[v].spt_p[i] = parent;
index_[v].spt_d[i] = hub_weight;
}
ifs.read((char*)&isize, sizeof(isize));
bindex_[v].spt_v.resize(isize);
bindex_[v].spt_d.resize(isize);
for (NodeID i = 0; i < bindex_[v].size(); ++i) {
NodeID hub;
NodeID parent;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&parent, sizeof(parent));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
bindex_[v].spt_v[i] = hub;
bindex_[v].spt_p[i] = parent;
bindex_[v].spt_d[i] = hub_weight;
}
}
ifs.close();*/
}
inline EdgeWeight query_p(NodeID s, NodeID t) {
//EdgeWeight distance = INF_WEIGHT;
//
////const index_t_p &idx_s = index_p[s];
////const index_t_p &idx_t = bindex_p[t];
//NodeID *vs = index_p[s].spt_v;
//NodeID *vt = bindex_p[t].spt_v;
//EdgeWeight* ws = index_p[s].spt_d;
//EdgeWeight* wt = bindex_p[t].spt_d;
//_mm_prefetch(vs, _MM_HINT_T0);
//_mm_prefetch(vt, _MM_HINT_T0);
//_mm_prefetch(ws, _MM_HINT_T0);
//_mm_prefetch(wt, _MM_HINT_T0);
//for (unsigned i = 0, j = 0; ; ) {
// if (*(vs + i) == *(vt + j)) {
// if (*(vs + i) == numOfVertices) break; // Sentinel
// EdgeWeight td = *(ws + i) + *(wt + j);
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += *(vs + i) < *(vt + j) ? 1 : 0;
// j += *(vs + i) > *(vt + j) ? 1 : 0;
// }
//}
//return distance;
EdgeWeight distance = INF_WEIGHT;
const index_t_path_p &idx_s = index_p[s];
const index_t_path_p &idx_t = bindex_p[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == v2) {
if (v1 == numOfVertices) break; // Sentinel
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) distance = td;
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
return distance;
}
};
template<int kNumBitParallelRoots = 50>
class BPLabel {
public:
index_t_bp<kNumBitParallelRoots>* index_bp;
BPLabel() {
}
~BPLabel() {
//Free();
}
EdgeWeight query_p(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
NodeID *vs = index_bp[s].spt_v;
NodeID *vt = index_bp[t].spt_v;
EdgeWeight* ws = index_bp[s].spt_d;
EdgeWeight* wt = index_bp[t].spt_d;
_mm_prefetch(vs, _MM_HINT_T0);
_mm_prefetch(vt, _MM_HINT_T0);
_mm_prefetch(ws, _MM_HINT_T0);
_mm_prefetch(wt, _MM_HINT_T0);
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight td = index_bp[s].bpspt_d[i] + index_bp[t].bpspt_d[i];
if (td - 2 <= distance) {
td +=
(index_bp[s].bpspt_s[i][0] & index_bp[t].bpspt_s[i][0]) ? -2 :
((index_bp[s].bpspt_s[i][0] & index_bp[t].bpspt_s[i][1]) | (index_bp[s].bpspt_s[i][1] & index_bp[t].bpspt_s[i][0]))
? -1 : 0;
if (td < distance) distance = td;
}
}
for (unsigned i = 0, j = 0; ; ) {
if (*(vs + i) == *(vt + j)) {
if (*(vs + i) == numOfVertices) break; // Sentinel
EdgeWeight td = *(ws + i) + *(wt + j);
if (td < distance) distance = td;
++i;
++j;
}
else {
i += *(vs + i) < *(vt + j) ? 1 : 0;
j += *(vs + i) > *(vt + j) ? 1 : 0;
}
}
return distance;
}
EdgeWeight query_p(NodeID s, NodeID t, bool& isBP) {
EdgeWeight distance = INF_WEIGHT;
const index_t_bp<kNumBitParallelRoots> &idx_s = index_bp[s];
const index_t_bp<kNumBitParallelRoots> &idx_t = index_bp[t];
_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
isBP = false;
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight td = index_bp[s].bpspt_d[i] + index_bp[t].bpspt_d[i];
if (td - 2 <= distance) {
td +=
(index_bp[s].bpspt_s[i][0] & index_bp[t].bpspt_s[i][0]) ? -2 :
((index_bp[s].bpspt_s[i][0] & index_bp[t].bpspt_s[i][1]) | (index_bp[s].bpspt_s[i][1] & index_bp[t].bpspt_s[i][0]))
? -1 : 0;
if (td < distance) {
distance = td;
isBP = true;
}
}
}
for (int i = 0, j = 0; ; ) {
NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
if (v1 == numOfVertices) break; // Sentinel
if (v1 == v2) {
EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
if (td < distance) {
distance = td;
isBP = false;
}
++i;
++j;
}
else {
i += v1 < v2 ? 1 : 0;
j += v1 > v2 ? 1 : 0;
}
}
return distance;
}
/*
NodeID max_size() {
NodeID maxsize = numeric_limits<NodeID>::min();
for (int i = 0; i < V; ++i) maxsize = max(maxsize, index_[i].spt_v.size());
return maxsize;
}*/
void print_stat() {
cout << "Average Label Size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
double avg_size() {
double lab_count = 0;
for (NodeID v = 0; v < numOfVertices; ++v) {
NodeID isize;
for (isize = 1; index_bp[v].spt_v[isize - 1] != numOfVertices; ++isize) continue;
lab_count += isize;
}
lab_count = (double)lab_count / (double)numOfVertices - 1;
return lab_count;
}
void Free() {
for (int v = 0; v < numOfVertices; ++v) {
free(index_bp[v].spt_v);
free(index_bp[v].spt_d);
}
free(index_bp);
index_bp = NULL;
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
int knumbit = kNumBitParallelRoots;
ofs.write((const char*)&knumbit, sizeof(knumbit));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_bp<kNumBitParallelRoots> &idx = index_bp[v];
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight d = idx.bpspt_d[i];
uint64_t a = idx.bpspt_s[i][0];
uint64_t b = idx.bpspt_s[i][1];
ofs.write((const char*)&d, sizeof(d));
ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&b, sizeof(b));
}
NodeID isize;
for (isize = 1; idx.spt_v[isize - 1] != numOfVertices; ++isize) continue; // Find the sentinel
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < isize; ++i) {
ofs.write((const char*)&idx.spt_v[i], sizeof(idx.spt_v[i]));
ofs.write((const char*)&idx.spt_d[i], sizeof(idx.spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename){
index_bp = NULL;
int knumbit;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
ifs.read((char*)&knumbit, sizeof(isize));
if (knumbit != kNumBitParallelRoots) {
cout << knumbit << "!=" << kNumBitParallelRoots << endl;
return;
}
index_bp = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_bp<kNumBitParallelRoots> &idx = index_bp[v];
for (int i = 0; i < kNumBitParallelRoots; ++i) {
//idx.bpspt_s[i] = (uint64_t*)memalign(64, 2 * sizeof(uint64_t));
EdgeWeight d;
uint64_t a, b;
ifs.read((char*)&d, sizeof(EdgeWeight));
ifs.read((char*)&a, sizeof(uint64_t));
ifs.read((char*)&b, sizeof(uint64_t));
idx.bpspt_d[i] = d;
idx.bpspt_s[i][0] = a;
idx.bpspt_s[i][1] = b;
}
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
}
}
ifs.close();
}
};
template<int kNumBitParallelRoots = 50>
class DBPLabel {
public:
index_t_bp<kNumBitParallelRoots>* index_bp;
index_t_bp<kNumBitParallelRoots>* bindex_bp;
DBPLabel() {
}
~DBPLabel() {
}
/*EdgeWeight query_p(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
NodeID *vs = index_p[s].spt_v;
NodeID *vt = index_p[t].spt_v;
EdgeWeight* ws = index_p[s].spt_d;
EdgeWeight* wt = index_p[t].spt_d;
_mm_prefetch(vs, _MM_HINT_T0);
_mm_prefetch(vt, _MM_HINT_T0);
_mm_prefetch(ws, _MM_HINT_T0);
_mm_prefetch(wt, _MM_HINT_T0);
for (unsigned i = 0, j = 0; ; ) {
if (*(vs + i) == *(vt + j)) {
if (*(vs + i) == numOfVertices) break; // Sentinel
EdgeWeight td = *(ws + i) + *(wt + j);
if (td < distance) distance = td;
++i;
++j;
}
else {
i += *(vs + i) < *(vt + j) ? 1 : 0;
j += *(vs + i) > *(vt + j) ? 1 : 0;
}
}
return distance;
//EdgeWeight distance = INF_WEIGHT;
//const index_t_p &idx_s = index_p[s];
//const index_t_p &idx_t = index_p[t];
//_mm_prefetch(&idx_s.spt_v[0], _MM_HINT_T0);
//_mm_prefetch(&idx_t.spt_v[0], _MM_HINT_T0);
//_mm_prefetch(&idx_s.spt_d[0], _MM_HINT_T0);
//_mm_prefetch(&idx_t.spt_d[0], _MM_HINT_T0);
//for (int i = 0, j = 0; ; ) {
// NodeID v1 = idx_s.spt_v[i], v2 = idx_t.spt_v[j];
// if (v1 == numOfVertices) break; // Sentinel
// if (v1 == v2) {
// EdgeWeight td = idx_s.spt_d[i] + idx_t.spt_d[j];
// if (td < distance) distance = td;
// ++i;
// ++j;
// }
// else {
// i += v1 < v2 ? 1 : 0;
// j += v1 > v2 ? 1 : 0;
// }
//}
//return distance;
}
*/
/*
NodeID max_size() {
NodeID maxsize = numeric_limits<NodeID>::min();
for (int i = 0; i < V; ++i) maxsize = max(maxsize, index_[i].spt_v.size());
return maxsize;
}*/
EdgeWeight query_p(NodeID s, NodeID t) {
EdgeWeight distance = INF_WEIGHT;
NodeID *vs = index_bp[s].spt_v;
NodeID *vt = bindex_bp[t].spt_v;
EdgeWeight* ws = index_bp[s].spt_d;
EdgeWeight* wt = bindex_bp[t].spt_d;
_mm_prefetch(vs, _MM_HINT_T0);
_mm_prefetch(vt, _MM_HINT_T0);
_mm_prefetch(ws, _MM_HINT_T0);
_mm_prefetch(wt, _MM_HINT_T0);
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight td = index_bp[s].bpspt_d[i] + bindex_bp[t].bpspt_d[i];
if (td - 2 <= distance) {
td +=
(index_bp[s].bpspt_s[i][0] & bindex_bp[t].bpspt_s[i][0]) ? -2 :
((index_bp[s].bpspt_s[i][0] & bindex_bp[t].bpspt_s[i][1]) | (index_bp[s].bpspt_s[i][1] & bindex_bp[t].bpspt_s[i][0]))
? -1 : 0;
if (td < distance) distance = td;
}
}
for (unsigned i = 0, j = 0; ; ) {
if (*(vs + i) == *(vt + j)) {
if (*(vs + i) == numOfVertices) break; // Sentinel
EdgeWeight td = *(ws + i) + *(wt + j);
if (td < distance) distance = td;
++i;
++j;
}
else {
i += *(vs + i) < *(vt + j) ? 1 : 0;
j += *(vs + i) > *(vt + j) ? 1 : 0;
}
}
return distance;
}
EdgeWeight query_p(NodeID s, NodeID t, bool& isBP) {
isBP = false;
EdgeWeight distance = INF_WEIGHT;
NodeID *vs = index_bp[s].spt_v;
NodeID *vt = bindex_bp[t].spt_v;
EdgeWeight* ws = index_bp[s].spt_d;
EdgeWeight* wt = bindex_bp[t].spt_d;
_mm_prefetch(vs, _MM_HINT_T0);
_mm_prefetch(vt, _MM_HINT_T0);
_mm_prefetch(ws, _MM_HINT_T0);
_mm_prefetch(wt, _MM_HINT_T0);
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight td = index_bp[s].bpspt_d[i] + bindex_bp[t].bpspt_d[i];
if (td - 2 <= distance) {
td +=
(index_bp[s].bpspt_s[i][0] & bindex_bp[t].bpspt_s[i][0]) ? -2 :
((index_bp[s].bpspt_s[i][0] & bindex_bp[t].bpspt_s[i][1]) | (index_bp[s].bpspt_s[i][1] & bindex_bp[t].bpspt_s[i][0]))
? -1 : 0;
if (td < distance) {
distance = td;
isBP = true;
}
}
}
for (unsigned i = 0, j = 0; ; ) {
if (*(vs + i) == *(vt + j)) {
if (*(vs + i) == numOfVertices) break; // Sentinel
EdgeWeight td = *(ws + i) + *(wt + j);
if (td < distance) {
distance = td;
isBP = false;
}
++i;
++j;
}
else {
i += *(vs + i) < *(vt + j) ? 1 : 0;
j += *(vs + i) > *(vt + j) ? 1 : 0;
}
}
return distance;
}
void print_stat() {
cout << "Average Label Size: " << avg_size() << endl;
//cout << "Maximum Label Size: " << max_size() << endl;
}
double avg_size() {
double lab_count = 0;
for (NodeID v = 0; v < numOfVertices; ++v) {
NodeID isize;
for (isize = 1; index_bp[v].spt_v[isize - 1] != numOfVertices; ++isize) continue;
lab_count += isize;
for (isize = 1; bindex_bp[v].spt_v[isize - 1] != numOfVertices; ++isize) continue;
}
lab_count = (double)lab_count / (double)numOfVertices - 1 / (double)2;
return lab_count;
}
void Free() {
for (int v = 0; v < numOfVertices; ++v) {
free(index_bp[v].spt_v);
free(index_bp[v].spt_d);
free(index_bp[v].bpspt_d);
free(index_bp[v].bpspt_s);
free(bindex_bp[v].spt_v);
free(bindex_bp[v].spt_d);
free(bindex_bp[v].bpspt_d);
free(bindex_bp[v].bpspt_s);
}
free(index_bp);
free(bindex_bp);
index_bp = NULL;
bindex_bp = NULL;
}
void save_labels(const char* save_filename) {
ofstream ofs(save_filename, ios::binary | ios::out);
ofs.write((const char*)&numOfVertices, sizeof(numOfVertices));
int knumbit = kNumBitParallelRoots;
ofs.write((const char*)&knumbit, sizeof(knumbit));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_bp<kNumBitParallelRoots> &idx = index_bp[v];
index_t_bp<kNumBitParallelRoots> &r_idx = bindex_bp[v];
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight d = idx.bpspt_d[i];
uint64_t a = idx.bpspt_s[i][0];
uint64_t b = idx.bpspt_s[i][1];
ofs.write((const char*)&d, sizeof(d));
ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&b, sizeof(b));
}
for (int i = 0; i < kNumBitParallelRoots; ++i) {
EdgeWeight d = r_idx.bpspt_d[i];
uint64_t a = r_idx.bpspt_s[i][0];
uint64_t b = r_idx.bpspt_s[i][1];
ofs.write((const char*)&d, sizeof(d));
ofs.write((const char*)&a, sizeof(a));
ofs.write((const char*)&b, sizeof(b));
}
NodeID isize;
for (isize = 1; idx.spt_v[isize - 1] != numOfVertices; ++isize) continue; // Find the sentinel
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < isize; ++i) {
ofs.write((const char*)&idx.spt_v[i], sizeof(idx.spt_v[i]));
ofs.write((const char*)&idx.spt_d[i], sizeof(idx.spt_d[i]));
}
for (isize = 1; r_idx.spt_v[isize - 1] != numOfVertices; ++isize) continue; // Find the sentinel
ofs.write((const char*)&isize, sizeof(isize));
for (NodeID i = 0; i < isize; ++i) {
ofs.write((const char*)&r_idx.spt_v[i], sizeof(r_idx.spt_v[i]));
ofs.write((const char*)&r_idx.spt_d[i], sizeof(r_idx.spt_d[i]));
}
}
ofs.close();
}
void load_labels(const char* load_filename) {
index_bp = NULL;
int knumbit;
ifstream ifs(load_filename);
NodeID isize = 0;
ifs.read((char*)&isize, sizeof(isize));
numOfVertices = isize;
ifs.read((char*)&knumbit, sizeof(isize));
if (knumbit != kNumBitParallelRoots) {
cout << knumbit << "!=" << kNumBitParallelRoots << endl;
return;
}
index_bp = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>));
bindex_bp = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>));
for (NodeID v = 0; v < numOfVertices; ++v) {
index_t_bp<kNumBitParallelRoots> &idx = index_bp[v];
index_t_bp<kNumBitParallelRoots> &r_idx = bindex_bp[v];
for (int i = 0; i < kNumBitParallelRoots; ++i) {
//idx.bpspt_s[i] = (uint64_t*)memalign(64, 2 * sizeof(uint64_t));
EdgeWeight d;
uint64_t a, b;
ifs.read((char*)&d, sizeof(EdgeWeight));
ifs.read((char*)&a, sizeof(uint64_t));
ifs.read((char*)&b, sizeof(uint64_t));
idx.bpspt_d[i] = d;
idx.bpspt_s[i][0] = a;
idx.bpspt_s[i][1] = b;
}
for (int i = 0; i < kNumBitParallelRoots; ++i) {
//idx.bpspt_s[i] = (uint64_t*)memalign(64, 2 * sizeof(uint64_t));
EdgeWeight d;
uint64_t a, b;
ifs.read((char*)&d, sizeof(EdgeWeight));
ifs.read((char*)&a, sizeof(uint64_t));
ifs.read((char*)&b, sizeof(uint64_t));
r_idx.bpspt_d[i] = d;
r_idx.bpspt_s[i][0] = a;
r_idx.bpspt_s[i][1] = b;
}
ifs.read((char*)&isize, sizeof(isize));
idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
idx.spt_v[i] = hub;
idx.spt_d[i] = hub_weight;
}
ifs.read((char*)&isize, sizeof(isize));
r_idx.spt_v = (NodeID*)memalign(64, isize * sizeof(NodeID));
r_idx.spt_d = (EdgeWeight*)memalign(64, isize * sizeof(EdgeWeight));
for (NodeID i = 0; i < isize; ++i) {
NodeID hub;
EdgeWeight hub_weight;
ifs.read((char*)&hub, sizeof(hub));
ifs.read((char*)&hub_weight, sizeof(hub_weight));
r_idx.spt_v[i] = hub;
r_idx.spt_d[i] = hub_weight;
}
}
ifs.close();
}
};
#endif |
simd-9.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
extern void abort ();
int a[32][32] __attribute__((aligned (32))) = { { 1 } };
struct S { int s; };
#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:struct S:omp_out.s += omp_in.s)
#pragma omp declare reduction (foo:int:omp_out += omp_in)
__attribute__((noinline, noclone)) int
foo (void)
{
int i, j, u = 0;
struct S s, t;
s.s = 0; t.s = 0;
#pragma omp simd aligned(a : 32) lastprivate (i, j) reduction(+:s) reduction(foo:t, u) collapse(2)
for (i = 0; i < 32; i++)
for (j = 0; j < 32; j++)
{
int *q = &i;
int *r = &j;
int x = a[i][j];
s.s += x;
t.s += x;
u += x;
}
if (t.s != s.s || u != s.s || i != 32 || j != 32)
abort ();
return s.s;
}
__attribute__((noinline, noclone)) int
bar (void)
{
int i, j, u = 0;
struct S s, t;
s.s = 0; t.s = 0;
#pragma omp simd aligned(a:32)reduction(+:s)reduction(foo:t,u)collapse(2)
for (i = 0; i < 32; i++)
for (j = 0; j < 32; j++)
{
int *q = &i;
int *r = &j;
int x = a[i][j];
s.s += x;
t.s += x;
u += x;
}
if (t.s != s.s || u != s.s || i != 32 || j != 32)
abort ();
return s.s;
}
int
main ()
{
int i, j;
for (i = 0; i < 32; i++)
for (j = 0; j < 32; j++)
a[i][j] = j + (i / 4);
int s = foo ();
if (s != 19456)
abort ();
if (bar () != 19456)
abort ();
return 0;
}
|
matrixio.c | /// \file
/// Matrix I/O.
#include "matrixio.h"
#include <stdio.h>
#include <math.h>
#include "sparseMatrix.h"
#include "constants.h"
/// \details
/// Write out sparsity from sparse matrix.
void writeSparsePattern(char* fname, struct SparseMatrixSt* spmatrix, real_t hthresh)
{
char hrow[spmatrix->hsize];
FILE* sFile;
sFile = fopen(fname, "w");
#pragma omp parallel for
for (int i = 0; i < spmatrix->hsize; i++)
{
for (int j = 0; j < spmatrix->hsize; j++)
{
hrow[j] = '.';
}
for (int j = 0; j < spmatrix->iia[i]; j++)
{
if (ABS(spmatrix->val[i][j]) > hthresh)
{
hrow[spmatrix->jja[i][j]] = '*';
}
}
for (int j = 0; j < spmatrix->hsize;j++)
{
fprintf(sFile, "%c", hrow[j]);
}
fprintf(sFile, "\n");
}
fclose(sFile);
}
/// \details
/// Read in hamiltonian matrix from file in Matrix Market format.
void readMTX(char* fname, struct SparseMatrixSt* hmatrix)
{
int hvalue, msum, irow, icol, ind;
char line[100], header1[20], header2[20], header3[20], header4[20], header5[20];
double value;
FILE* hFile;
hFile = fopen(fname, "r");
// Read in header
fscanf(hFile, "%s %s %s %s %s", header1, header2, header3, header4, header5);
// Read in dimensions of matrix as dense and the number of sparse elements
fscanf(hFile, "%d %d %d", &hvalue, &hvalue, &msum);
// Read in elements for sparse matrix
// Read in as 1-based
for (int i = 0; i < msum; i++)
{
fscanf(hFile, "%d %d %lg", &irow, &icol, &value);
irow--; icol--;
ind = hmatrix->iia[irow];
hmatrix->jja[irow][ind] = icol;
hmatrix->val[irow][ind] = value;
hmatrix->iia[irow]++;
}
fclose(hFile);
}
/// \details
/// Write out sparse matrix in Matrix market format.
void writeMTX(char* fname, struct SparseMatrixSt* spmatrix)
{
FILE* mFile;
int msum;
mFile = fopen(fname, "w");
// Write header
fprintf(mFile, "\%\%\%MatrixMarket matrix coordinate real general\n");
// Collect number of non-zero elements
// Write out matrix size as dense and number of non-zero elements
msum = 0;
for (int i = 0; i < spmatrix->hsize; i++)
{
msum += spmatrix->iia[i];
}
fprintf(mFile, "%d %d %d\n", spmatrix->hsize, spmatrix->hsize, msum);
// Write out non-zero elements
for (int i = 0; i < spmatrix->hsize; i++)
{
for (int j = 0; j < spmatrix->iia[i]; j++)
{
fprintf(mFile, "%d %d %lg\n", i+1, spmatrix->jja[i][j]+1, spmatrix->val[i][j]);
}
}
fclose(mFile);
}
|
schedule-openmp.c | /*****************************************************************************
About: OpenMP program to see scheduling(static,dynamic,guided) of loop iterations
among threads.
export OMP_SCHEDULE=dynamic,4 //change chunksize to different values
export OMP_NUM_THREADS=4 //change no. of threads to different values
*****************************************************************************/
#include<stdio.h>
#include<omp.h>
#include<unistd.h>
int main()
{
int i,tid;
#pragma omp parallel private(tid)
{
tid=omp_get_thread_num();
#pragma omp for schedule(runtime)
for(i=0;i<20;i++)
{
printf("i=%3d tid=%d\n",i,tid);
sleep(4);
}
}
return 0;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-2,3)),ceild(16*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(8*t1+Ny+13,24)),floord(16*t2+Ny+12,24)),floord(16*t1-16*t2+Nz+Ny+11,24));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-124,128)),ceild(24*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(8*t1+Nx+13,128)),floord(16*t2+Nx+12,128)),floord(24*t3+Nx+20,128)),floord(16*t1-16*t2+Nz+Nx+11,128));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),24*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),24*t3+22),128*t4+126),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
Merge.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define SWAP(a,b) {tt=(a); (a)=(b); (b)=tt;}
void Merge(int *a1,int n1, int *a2,int n2, int *r)
{
int i1,i2,i;
for (i1=0, i2=0, i=0;i1<n1&&i2<n2;)
if (a1[i1]<a2[i2])r[i++]=a1[i1++];
else r[i++]=a2[i2++];
while(i1<n1) r[i++]=a1[i1++];
while(i2<n2) r[i++]=a2[i2++];
}
void MSort(int *m,int n,int *t)
{
int n1,n2,i;
if(n<=1)return;
n1=n/2; n2=n-n1;
MSort (m,n1,t);
MSort(m+n1,n2,t);
Merge(m,n1,m+n1,n2,t);
memcpy (m,t,n*sizeof(int));
}
void MSort2(int *m,int n,int *t)
{
int n1,n2,n3,n4;
if (n<=1)return;
n1=n/2; n3=n-n1;
n2=n1/2; n1=n1-n2;
n4=n3/2; n3=n3-n4;
#pragma omp parallel sections
{
#pragma omp section
{MSort(m,n1,t);}
#pragma omp section
{MSort (m+n1,n2,t+n1);}
#pragma omp section
{MSort (m+n1+n2,n3,t+n1+n2);}
#pragma omp section
{MSort (m+n1+n2+n3,n4,t+n1+n2+n3);}
}
#pragma omp parallel sections
{
#pragma omp section
{Merge(m,n1, m+n1,n2, t); memcpy(m,t, (n1+n2)*sizeof(int));}
#pragma omp section
{Merge(m+n1+n2,n3,m+n1+n2+n3,n4,t+n1+n2); memcpy(m+n1+n2, t+n1+n2, (n3+n4)*sizeof(int));}
}
Merge(m,n1+n2, m+n1+n2,n3+n4,t);
memcpy(m,t,n*sizeof(int));
}
void MSort3(int *m,int n,int *t)
{
int k,k2,i,tt;
//for (k=1;k<n;k*=2)
k=1;
{
k2=k;
#pragma omp parallel for private(tt)
for(i=0;i<n-1;i+=2)
if(m[i]>m[i+1]) SWAP(m[i],m[i+1]);
}
for(k=2;k<n;k*=2)
{
k2=k;
#pragma omp parallel for private(k2)
for(i=0;i<n-k;i+=2*k)
{k2=k;
if(n-i-k<k2)k2=n-i-k;
Merge(m+i,k, m+i+k,k2,t+i);
memcpy(m+i,t+i,(k+k2)*sizeof(int));
}
}
}
int main(void)
{
int i,*a,*a0,*t,*b,n=100000; time_t t0,t1;
a0=(int*)malloc(n*sizeof(int)); a=(int*)malloc(n*sizeof(int)); t=(int*)malloc(n*sizeof(int)); b=(int*)malloc(n*sizeof(int));
for(i=0;i<n;i++)a0[i]=rand()%n;
//
memcpy(b,a0,n*sizeof(int));
time(&t0); MSort(b,n,t); time(&t1); printf("MSort:%d\n", (int)(t1-t0));
for(i=1;i<n;i++) if(b[i]<b[i-1]) printf ("Err1:i=%d\n",i);
//
memcpy(a,a0,n*sizeof(int));
time(&t0); MSort3(a,n,t); time(&t1); printf("MSort3:%d\n", (int)(t1-t0));
for(i=0;i<n;i++) if(a[i]!=b[i]) printf ("Err2:i=%d\n",i);
//
memcpy(a,a0,n*sizeof(int));
time(&t0); MSort2(a,n,t); time(&t1); printf("MSort2:%d\n", (int)(t1-t0));
for(i=0;i<n;i++) if(a[i]!=b[i]) printf ("Err3:i=%d\n",i);
//
free(a0) ;a0=NULL; free(a); a=NULL; free(t); t=NULL; free(b); b=NULL;
printf("done\n");
return 0;
}
|
debug_task_shared.c | // This testcase checks emission of debug info for variables
// inside shared clause of task construct.
// REQUIRES: x86_64-linux
// RUN: %clang_cc1 -debug-info-kind=constructor -DSHARED -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK
// RUN: %clang_cc1 -debug-info-kind=constructor -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=NEG
// RUN: %clang_cc1 -debug-info-kind=line-directives-only -DSHARED -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=NEG
// RUN: %clang_cc1 -debug-info-kind=line-tables-only -DSHARED -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=NEG
// RUN: %clang_cc1 -debug-info-kind=limited -DSHARED -x c -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK
// expected-no-diagnostics
// CHECK-LABEL: define internal i32 @.omp_task_entry.
// CHECK-DAG: [[CONTEXT:%[0-9]+]] = load %struct.anon*, %struct.anon** %__context.addr.i, align 8
// CHECK-DAG: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata [[SHARE2:![0-9]+]], metadata !DIExpression(DW_OP_deref))
// CHECK-DAG: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata [[SHARE3:![0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref))
// CHECK-DAG: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata [[SHARE1:![0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_deref))
// CHECK-DAG: [[SHARE2]] = !DILocalVariable(name: "share2"
// CHECK-DAG: [[SHARE3]] = !DILocalVariable(name: "share3"
// CHECK-DAG: [[SHARE1]] = !DILocalVariable(name: "share1"
// NEG-LABEL: define internal i32 @.omp_task_entry.
// NEG: [[CONTEXT:%[0-9]+]] = load %struct.anon*, %struct.anon** %__context.addr.i, align 8
// NEG-NOT: call void @llvm.dbg.declare(metadata %struct.anon* [[CONTEXT]], metadata {{![0-9]+}}, metadata !DIExpression(DW_OP_deref))
extern int printf(const char *, ...);
int foo(int n) {
int share1 = 9, share2 = 11, share3 = 13, priv1, priv2, fpriv;
fpriv = n + 4;
if (n < 2)
return n;
else {
#if SHARED
#pragma omp task shared(share1, share2) private(priv1, priv2) firstprivate(fpriv) shared(share3)
#else
#pragma omp task private(priv1, priv2) firstprivate(fpriv)
#endif
{
priv1 = n;
priv2 = n + 2;
share2 += share3;
printf("share1 = %d, share2 = %d, share3 = %d\n", share1, share2, share3);
share1 = priv1 + priv2 + fpriv + foo(n - 1) + share2 + share3;
}
#pragma omp taskwait
return share1 + share2 + share3;
}
}
int main() {
int n = 10;
printf("foo(%d) = %d\n", n, foo(n));
return 0;
}
|
jacobi_omp.c | /*
* Copyright (c) 2008, BSC (Barcelon Supercomputing Center)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY BSC ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <time.h>
#define NB 256
#define B 32
#define FALSE (0)
#define TRUE (1)
typedef double fp_type;
typedef fp_type *vin;
typedef fp_type *vout;
typedef fp_type *bin;
typedef fp_type *binout;
fp_type *A[NB][NB];
fp_type *A_new[NB][NB];
fp_type *tmp[NB][NB];
void alloc_and_genmat()
{
int init_val, i, j, ii, jj;
fp_type *p, *p_new;
init_val = 1325;
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
A[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
A_new[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
tmp[ii][jj] = (fp_type *)malloc(B * B * sizeof(fp_type));
if (A[ii][jj] == NULL || A_new[ii][jj] == NULL || tmp[ii][jj] == NULL)
{
printf("Out of memory\n");
exit(1);
}
p = A[ii][jj];
p_new = A_new[ii][jj];
for (i = 0; i < B; i++)
{
for (j = 0; j < B; j++)
{
init_val = (3125 * init_val) % 65536;
(*p) = (fp_type)((init_val - 32768.0) / 16384.0);
(*p_new) = (*p);
p++;
p_new++;
}
}
}
}
}
long usecs(void)
{
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1000000 + t.tv_usec;
}
void clear(vout v)
{
int i, j, k;
for (i = 0; i < B; i++)
v[i] = (fp_type)0.0;
}
void getlastrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[(B - 1) * B + j];
}
void getlastcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + B - 1];
}
void getfirstrow(bin A, vout v)
{
int j;
for (j = 0; j < B; j++)
v[j] = A[0 * B + j];
}
void getfirstcol(bin A, vout v)
{
int i;
for (i = 0; i < B; i++)
v[i] = A[i * B + 0];
}
void jacobi(vin lefthalo, vin tophalo, vin righthalo, vin bottomhalo, bin A, binout A_new)
{
int i, j;
fp_type tmp;
fp_type left, top, right, bottom;
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
tmp = A[i * B + j];
left = (j == 0 ? lefthalo[j] : A[i * B + j - 1]);
top = (i == 0 ? tophalo[i] : A[(i - 1) * B + j]);
right = (j == B - 1 ? righthalo[i] : A[i * B + j + 1]);
bottom = (i == B - 1 ? bottomhalo[i] : A[(i + 1) * B + j]);
A_new[i * B + j] = 0.2 * (A[i * B + j] + left + top + right + bottom);
}
}
}
double maxdelta()
{
double dmax = -__DBL_MAX__;
int ii, jj, i, j;
#pragma omp parallel for schedule(static) reduction(max: dmax)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
for (i = 0; (i < B); i++)
{
for (j = 0; j < B; j++)
{
double diff = fabs(A_new[ii][jj][i * B + j] - A[ii][jj][i * B + j]);
if(diff > dmax) dmax = diff;
}
}
}
}
return dmax;
}
void compute(int niters)
{
int iters;
int ii, jj;
fp_type lefthalo[B], tophalo[B], righthalo[B], bottomhalo[B];
double delta = 2.0;
double epsilon = 1e-7;
iters = 0;
// for (iters = 0; iters < niters; iters++)
while(iters < niters)
{
++iters;
#pragma omp parallel \
private(ii, jj, lefthalo, tophalo, righthalo, bottomhalo) \
shared(A, A_new)
{
#pragma omp for schedule(static)
for (ii = 0; ii < NB; ii++)
{
for (jj = 0; jj < NB; jj++)
{
if (ii > 0)
getlastrow(A[ii - 1][jj], tophalo);
else
clear(tophalo);
if (jj > 0)
getlastcol(A[ii][jj - 1], lefthalo);
else
clear(lefthalo);
if (ii < NB - 1)
getfirstrow(A[ii + 1][jj], bottomhalo);
else
clear(bottomhalo);
if (jj < NB - 1)
getfirstcol(A[ii][jj + 1], righthalo);
else
clear(lefthalo);
jacobi(lefthalo, tophalo, righthalo, bottomhalo, A[ii][jj], A_new[ii][jj]);
} // jj
} // ii
} // end parallel
delta = maxdelta();
printf("iteration %d: delta = %e\n", iters, delta);
// yes, this is an inefficient copy
// however, the library version requires you to do a copy in this way
// on all of the component parts to avoid segmentation fault
#pragma omp parallel for schedule(static) shared(A, A_new)
for(int i = 0; i < NB; ++i)
{
for(int j = 0; j < NB; ++j)
{
for(int k = 0; k < B; ++k)
for(int l = 0; l < B; ++l)
A[i][j][k * B + l] = A_new[i][j][k * B + l];
}
}
} // iter
}
int main(int argc, char *argv[])
{
int niters;
// pp_time_t tm;
// memset( &tm, 0, sizeof(tm) );
struct timespec start, end;
if (argc > 1)
{
niters = atoi(argv[1]);
}
else
niters = 1;
alloc_and_genmat();
clock_gettime(CLOCK_MONOTONIC, &start);
compute(niters);
clock_gettime(CLOCK_MONOTONIC, &end);
double time_taken = (end.tv_sec - start.tv_sec) * 1e9;
time_taken = (time_taken + (end.tv_nsec - start.tv_nsec)) * 1e-9;
printf("Running time = %g %s\n", time_taken, "s");
/* FILE *outFile;
outFile = fopen("./jacobi_omp_values.txt", "w");
if (outFile == NULL)
{
fprintf(stderr, "Error writing to file\n");
}
else
{
int ii, jj, i, j;
for (ii = 0; ii < NB; ++ii)
for (jj = 0; jj < NB; ++jj)
for (i = 0; i < B; ++i)
for (j = 0; j < B; ++j)
fprintf(outFile, "%.15f\n", A[ii][jj][i * B + j]);
fclose(outFile);
} */
return 0;
} |
blockbanded_source.c | void X(destroy_block_2x2_triangular_banded)(X(block_2x2_triangular_banded) * A) {
X(destroy_triangular_banded)(A->data[0][0]);
X(destroy_triangular_banded)(A->data[0][1]);
X(destroy_triangular_banded)(A->data[1][0]);
X(destroy_triangular_banded)(A->data[1][1]);
free(A);
}
void X(destroy_btb_eigen_FMM)(X(btb_eigen_FMM) * F) {
X(destroy_tb_eigen_FMM)(F->F);
free(F->s);
free(F->c);
free(F->t);
free(F);
}
X(block_2x2_triangular_banded) * X(create_block_2x2_triangular_banded)(X(triangular_banded) * data[2][2]) {
X(block_2x2_triangular_banded) * A = malloc(sizeof(X(block_2x2_triangular_banded)));
int n = data[0][0]->n;
if (data[0][1]->n != n || data[1][0]->n != n || data[1][1]->n != n)
exit_failure("create_block_2x2_triangular_banded: block sizes are not all the same.");
int b = MAX(MAX(data[0][0]->b, data[0][1]->b), MAX(data[1][0]->b, data[1][1]->b));
if (data[0][0]->b != b)
X(realloc_triangular_banded)(data[0][0], b);
if (data[0][1]->b != b)
X(realloc_triangular_banded)(data[0][1], b);
if (data[1][0]->b != b)
X(realloc_triangular_banded)(data[1][0], b);
if (data[1][1]->b != b)
X(realloc_triangular_banded)(data[1][1], b);
A->data[0][0] = data[0][0];
A->data[0][1] = data[0][1];
A->data[1][0] = data[1][0];
A->data[1][1] = data[1][1];
A->n = n;
A->b = b;
return A;
}
X(triangular_banded) * X(convert_block_2x2_triangular_banded_to_triangular_banded)(X(block_2x2_triangular_banded) * A) {
int n = A->n, b = A->b;
X(triangular_banded) * B = X(malloc_triangular_banded)(2*n, 2*b+1);
for (int j = 0; j < 2*n; j++)
for (int k = MAX(j-2*b-1, 0); k <= j; k++)
X(set_triangular_banded_index)(B, X(get_block_2x2_triangular_banded_index)(A, k, j), k, j);
return B;
}
FLT X(get_block_2x2_triangular_banded_index)(const X(block_2x2_triangular_banded) * A, const int i, const int j) {
return X(get_triangular_banded_index)(A->data[i%2][j%2], i/2, j/2);
}
void X(set_block_2x2_triangular_banded_index)(const X(block_2x2_triangular_banded) * A, const FLT v, const int i, const int j) {
return X(set_triangular_banded_index)(A->data[i%2][j%2], v, i/2, j/2);
}
void X(block_get_block_2x2_triangular_banded_index)(const X(block_2x2_triangular_banded) * A, FLT v[2][2], const int i, const int j) {
v[0][0] = X(get_triangular_banded_index)(A->data[0][0], i, j);
v[0][1] = X(get_triangular_banded_index)(A->data[0][1], i, j);
v[1][0] = X(get_triangular_banded_index)(A->data[1][0], i, j);
v[1][1] = X(get_triangular_banded_index)(A->data[1][1], i, j);
}
void X(block_set_block_2x2_triangular_banded_index)(const X(block_2x2_triangular_banded) * A, const FLT v[2][2], const int i, const int j) {
X(set_triangular_banded_index)(A->data[0][0], v[0][0], i, j);
X(set_triangular_banded_index)(A->data[0][1], v[0][1], i, j);
X(set_triangular_banded_index)(A->data[1][0], v[1][0], i, j);
X(set_triangular_banded_index)(A->data[1][1], v[1][1], i, j);
}
static inline void X(inverse_2x2)(const FLT A[2][2], FLT B[2][2]) {
FLT d = A[0][0]*A[1][1] - A[0][1]*A[1][0];
B[0][0] = A[1][1]/d;
B[0][1] = -A[0][1]/d;
B[1][0] = -A[1][0]/d;
B[1][1] = A[0][0]/d;
}
// x ← A*x, x ← Aᵀ*x
void X(btbmv)(char TRANS, X(block_2x2_triangular_banded) * A, FLT * x) {
int n = A->n, bnd = A->b;
FLT a[2][2], t[2];
if (TRANS == 'N') {
for (int i = 0; i < n; i++) {
t[1] = t[0] = 0;
for (int k = i; k < MIN(i+bnd+1, n); k++) {
X(block_get_block_2x2_triangular_banded_index)(A, a, i, k);
t[0] += a[0][0]*x[2*k] + a[0][1]*x[2*k+1];
t[1] += a[1][0]*x[2*k] + a[1][1]*x[2*k+1];
}
x[2*i] = t[0];
x[2*i+1] = t[1];
}
}
else if (TRANS == 'T') {
for (int i = n-1; i >= 0; i--) {
t[1] = t[0] = 0;
for (int k = MAX(i-bnd, 0); k <= i; k++) {
X(block_get_block_2x2_triangular_banded_index)(A, a, k, i);
t[0] += a[0][0]*x[2*k] + a[1][0]*x[2*k+1];
t[1] += a[0][1]*x[2*k] + a[1][1]*x[2*k+1];
}
x[2*i] = t[0];
x[2*i+1] = t[1];
}
}
}
// x ← A⁻¹*x, x ← A⁻ᵀ*x
void X(btbsv)(char TRANS, X(block_2x2_triangular_banded) * A, FLT * x) {
int n = A->n, bnd = A->b;
FLT a[2][2], b[2][2], t[2];
if (TRANS == 'N') {
for (int i = n-1; i >= 0; i--) {
t[1] = t[0] = 0;
for (int k = i+1; k < MIN(i+bnd+1, n); k++) {
X(block_get_block_2x2_triangular_banded_index)(A, a, i, k);
t[0] += a[0][0]*x[2*k] + a[0][1]*x[2*k+1];
t[1] += a[1][0]*x[2*k] + a[1][1]*x[2*k+1];
}
X(block_get_block_2x2_triangular_banded_index)(A, a, i, i);
X(inverse_2x2)(a, b);
t[0] = x[2*i]-t[0];
t[1] = x[2*i+1]-t[1];
x[2*i] = b[0][0]*t[0] + b[0][1]*t[1];
x[2*i+1] = b[1][0]*t[0] + b[1][1]*t[1];
}
}
else if (TRANS == 'T') {
for (int i = 0; i < n; i++) {
t[1] = t[0] = 0;
for (int k = MAX(i-bnd, 0); k < i; k++) {
X(block_get_block_2x2_triangular_banded_index)(A, a, k, i);
t[0] += a[0][0]*x[2*k] + a[1][0]*x[2*k+1];
t[1] += a[0][1]*x[2*k] + a[1][1]*x[2*k+1];
}
X(block_get_block_2x2_triangular_banded_index)(A, a, i, i);
X(inverse_2x2)(a, b);
t[0] = x[2*i]-t[0];
t[1] = x[2*i+1]-t[1];
x[2*i] = b[0][0]*t[0] + b[1][0]*t[1];
x[2*i+1] = b[0][1]*t[0] + b[1][1]*t[1];
}
}
}
// AV = BVΛ, A and B are block upper-triangular and banded and Λ is real.
static inline void X(real_quadratic_formula)(const FLT a, const FLT b, const FLT c, FLT x[2]) {
FLT d = b*b-4*a*c;
if (d < 0)
exit_failure("real_quadratic_formula: discriminant is negative.");
d = Y(sqrt)(d);
if (a > 0) {
if (b > 0) {
x[0] = -(b+d)/(2*a);
x[1] = -2*c/(b+d);
}
else {
x[0] = 2*c/(d-b);
x[1] = (d-b)/(2*a);
}
}
else if (a < 0) {
if (b > 0) {
x[0] = -2*c/(b+d);
x[1] = -(b+d)/(2*a);
}
else {
x[0] = (d-b)/(2*a);
x[1] = 2*c/(d-b);
}
}
else
exit_failure("real_quadratic_formula: quadratic is a degenerate linear.");
}
static inline void X(generalized_eigenvalues_2x2)(const FLT A[2][2], const FLT B[2][2], FLT lambda[2]) {
FLT a, b, c;
a = B[0][0]*B[1][1]-B[0][1]*B[1][0];
b = A[0][1]*B[1][0]+A[1][0]*B[0][1]-A[0][0]*B[1][1]-A[1][1]*B[0][0];
c = A[0][0]*A[1][1]-A[0][1]*A[1][0];
X(real_quadratic_formula)(a, b, c, lambda);
}
void X(block_2x2_triangular_banded_eigenvalues)(X(block_2x2_triangular_banded) * A, X(block_2x2_triangular_banded) * B, FLT * lambda) {
FLT a[2][2], b[2][2];
for (int j = 0; j < A->n; j++) {
X(block_get_block_2x2_triangular_banded_index)(A, a, j, j);
X(block_get_block_2x2_triangular_banded_index)(B, b, j, j);
X(generalized_eigenvalues_2x2)(a, b, lambda+2*j);
}
}
static inline void X(scaled_diff_2x2)(const FLT A[2][2], const FLT lambda, const FLT B[2][2], FLT C[2][2]) {
C[0][0] = A[0][0] - lambda*B[0][0];
C[0][1] = A[0][1] - lambda*B[0][1];
C[1][0] = A[1][0] - lambda*B[1][0];
C[1][1] = A[1][1] - lambda*B[1][1];
}
// Assumes eigenvectors are initialized by V[i,2j] = V[i,2j+1] = 0 for i > 2j+1 and V[2j,2j] ≠ 0, V[2j,2j+1] ≠ 0.
void X(block_2x2_triangular_banded_eigenvectors)(X(block_2x2_triangular_banded) * A, X(block_2x2_triangular_banded) * B, FLT * V) {
int n = A->n, bnd = MAX(A->b, B->b);
FLT t[2], a[2][2], b[2][2], c[2][2], d[2][2], lam[2];
for (int j = 0; j < n; j++) {
X(block_get_block_2x2_triangular_banded_index)(A, a, j, j);
X(block_get_block_2x2_triangular_banded_index)(B, b, j, j);
X(generalized_eigenvalues_2x2)(a, b, lam);
V[2*j+1+2*j*2*n] = (b[1][0]*lam[0]-a[1][0])*V[2*j+2*j*2*n]/(a[1][1]-b[1][1]*lam[0]);
V[2*j+1+(2*j+1)*2*n] = (b[1][0]*lam[1]-a[1][0])*V[2*j+(2*j+1)*2*n]/(a[1][1]-b[1][1]*lam[1]);
for (int i = j-1; i >= 0; i--) {
for (int l = 0; l <= 1; l++) {
t[1] = t[0] = 0;
for (int k = i+1; k < MIN(i+bnd+1, n); k++) {
X(block_get_block_2x2_triangular_banded_index)(A, a, i, k);
X(block_get_block_2x2_triangular_banded_index)(B, b, i, k);
X(scaled_diff_2x2)(a, lam[l], b, c);
t[0] += c[0][0]*V[2*k+(2*j+l)*2*n] + c[0][1]*V[2*k+1+(2*j+l)*2*n];
t[1] += c[1][0]*V[2*k+(2*j+l)*2*n] + c[1][1]*V[2*k+1+(2*j+l)*2*n];
}
X(block_get_block_2x2_triangular_banded_index)(A, a, i, i);
X(block_get_block_2x2_triangular_banded_index)(B, b, i, i);
X(scaled_diff_2x2)(a, lam[l], b, c);
X(inverse_2x2)(c, d);
V[2*i+(2*j+l)*2*n] = -(d[0][0]*t[0] + d[0][1]*t[1]);
V[2*i+1+(2*j+l)*2*n] = -(d[1][0]*t[0] + d[1][1]*t[1]);
}
}
}
}
static inline void X(compute_givens)(const FLT x, const FLT y, FLT * c, FLT * s, FLT * r) {
* r = Y(hypot)(x, y);
if (* r <= Y(floatmin)()/Y(eps)()) {
* c = 1;
* s = 0;
}
else {
* c = x / * r;
* s = y / * r;
}
}
// Apply Givens rotation [c s; -s c] or its TRANSpose to array A from the left or right SIDE.
static inline void X(apply_givens)(char TRANS, char SIDE, const FLT c, const FLT s, FLT A[2][2]) {
FLT t1, t2, t3, t4;
if (SIDE == 'L') {
if (TRANS == 'N') {
t1 = c*A[0][0]+s*A[1][0];
t2 = c*A[0][1]+s*A[1][1];
t3 = c*A[1][0]-s*A[0][0];
t4 = c*A[1][1]-s*A[0][1];
A[0][0] = t1;
A[0][1] = t2;
A[1][0] = t3;
A[1][1] = t4;
}
else if (TRANS == 'T') {
t1 = c*A[0][0]-s*A[1][0];
t2 = c*A[0][1]-s*A[1][1];
t3 = c*A[1][0]+s*A[0][0];
t4 = c*A[1][1]+s*A[0][1];
A[0][0] = t1;
A[0][1] = t2;
A[1][0] = t3;
A[1][1] = t4;
}
}
else if (SIDE == 'R') {
if (TRANS == 'N') {
t1 = c*A[0][0]-s*A[0][1];
t2 = c*A[0][1]+s*A[0][0];
t3 = c*A[1][0]-s*A[1][1];
t4 = c*A[1][1]+s*A[1][0];
A[0][0] = t1;
A[0][1] = t2;
A[1][0] = t3;
A[1][1] = t4;
}
else if (TRANS == 'T') {
t1 = c*A[0][0]+s*A[0][1];
t2 = c*A[0][1]-s*A[0][0];
t3 = c*A[1][0]+s*A[1][1];
t4 = c*A[1][1]-s*A[1][0];
A[0][0] = t1;
A[0][1] = t2;
A[1][0] = t3;
A[1][1] = t4;
}
}
}
// D is 2n initial conditions.
// On entry: D[2j] = V[2j, 2j], D[2j+1] = V[2j, 2j+1].
// On exit: D[2j] = TV[2j, 2j], D[2j+1] = TV[2j+1, 2j+1].
X(btb_eigen_FMM) * X(btb_eig_FMM)(X(block_2x2_triangular_banded) * A, X(block_2x2_triangular_banded) * B, FLT * D) {
int n = A->n, bnd = MAX(A->b, B->b);
FLT * s = malloc(n*sizeof(FLT));
FLT * c = malloc(n*sizeof(FLT));
FLT a[2][2], b[2][2], lambda[2], ts, tc, r, t1, t2;
// Stage 1: triangularize (2x2 block-triangular) eigenvectors via Givens rotations.
for (int j = 0; j < n; j++) {
X(block_get_block_2x2_triangular_banded_index)(A, a, j, j);
X(block_get_block_2x2_triangular_banded_index)(B, b, j, j);
X(generalized_eigenvalues_2x2)(a, b, lambda);
t1 = (b[1][0]*lambda[0]-a[1][0])*D[2*j]/(a[1][1]-b[1][1]*lambda[0]);
t2 = (b[1][0]*lambda[1]-a[1][0])*D[2*j+1]/(a[1][1]-b[1][1]*lambda[1]);
X(compute_givens)(D[2*j], t1, c+j, s+j, &r);
D[2*j] = r;
D[2*j+1] = c[j]*t2-s[j]*D[2*j+1];
for (int k = MAX(j-bnd, 0); k <= j; k++) {
X(block_get_block_2x2_triangular_banded_index)(A, a, k, j);
X(apply_givens)('T', 'R', c[j], s[j], a);
X(block_set_block_2x2_triangular_banded_index)(A, a, k, j);
X(block_get_block_2x2_triangular_banded_index)(B, b, k, j);
X(apply_givens)('T', 'R', c[j], s[j], b);
X(block_set_block_2x2_triangular_banded_index)(B, b, k, j);
}
}
// Stage 2: triangularize (2x2 block-triangular banded) pencil via Givens rotations.
for (int i = 0; i < n; i++) {
X(block_get_block_2x2_triangular_banded_index)(B, b, i, i);
X(compute_givens)(b[0][0], b[1][0], &tc, &ts, &r);
for (int k = i; k < MIN(i+bnd+1, n); k++) {
X(block_get_block_2x2_triangular_banded_index)(A, a, i, k);
X(apply_givens)('N', 'L', tc, ts, a);
X(block_set_block_2x2_triangular_banded_index)(A, a, i, k);
X(block_get_block_2x2_triangular_banded_index)(B, b, i, k);
X(apply_givens)('N', 'L', tc, ts, b);
X(block_set_block_2x2_triangular_banded_index)(B, b, i, k);
}
}
// Stage 3: convert (2x2 block-triangular banded) pencil to triangular banded pencil.
X(triangular_banded) * TA = X(convert_block_2x2_triangular_banded_to_triangular_banded)(A);
X(triangular_banded) * TB = X(convert_block_2x2_triangular_banded_to_triangular_banded)(B);
// Stage 4: call X(tb_eig_FMM)(TA, TB, TD)
X(tb_eigen_FMM) * F = X(tb_eig_FMM)(TA, TB, D);
X(destroy_triangular_banded)(TA);
X(destroy_triangular_banded)(TB);
X(btb_eigen_FMM) * BF = malloc(sizeof(X(btb_eigen_FMM)));
BF->F = F;
BF->s = s;
BF->c = c;
BF->t = calloc(2*n*FT_GET_MAX_THREADS(), sizeof(FLT));
BF->n = n;
return BF;
}
// x ← A*x, x ← Aᵀ*x
void X(btrmv)(char TRANS, int n, FLT * A, int LDA, FLT * x) {
FLT t[2];
if (TRANS == 'N') {
for (int j = 0; j < n; j++) {
for (int i = 0; i < j; i++) {
x[2*i] += A[2*i+2*j*LDA]*x[2*j] + A[2*i+(2*j+1)*LDA]*x[2*j+1];
x[2*i+1] += A[2*i+1+2*j*LDA]*x[2*j] + A[2*i+1+(2*j+1)*LDA]*x[2*j+1];
}
t[0] = x[2*j];
t[1] = x[2*j+1];
x[2*j] = A[2*j+2*j*LDA]*t[0] + A[2*j+(2*j+1)*LDA]*t[1];
x[2*j+1] = A[2*j+1+2*j*LDA]*t[0] + A[2*j+1+(2*j+1)*LDA]*t[1];
}
}
else if (TRANS == 'T') {
for (int i = n-1; i >= 0; i--) {
t[0] = x[2*i];
t[1] = x[2*i+1];
x[2*i] = A[2*i+2*i*LDA]*t[0] + A[2*i+1+2*i*LDA]*t[1];
x[2*i+1] = A[2*i+(2*i+1)*LDA]*t[0] + A[2*i+1+(2*i+1)*LDA]*t[1];
for (int j = i-1; j >= 0; j--) {
x[2*i] += A[2*j+2*i*LDA]*x[2*j] + A[2*j+1+2*i*LDA]*x[2*j+1];
x[2*i+1] += A[2*j+(2*i+1)*LDA]*x[2*j] + A[2*j+1+(2*i+1)*LDA]*x[2*j+1];
}
}
}
}
// x ← A⁻¹*x, x ← A⁻ᵀ*x
void X(btrsv)(char TRANS, int n, FLT * A, int LDA, FLT * x) {
FLT c[2][2], d[2][2], t[2];
if (TRANS == 'N') {
for (int j = n-1; j >= 0; j--) {
c[0][0] = A[2*j+2*j*LDA];
c[0][1] = A[2*j+(2*j+1)*LDA];
c[1][0] = A[2*j+1+2*j*LDA];
c[1][1] = A[2*j+1+(2*j+1)*LDA];
X(inverse_2x2)(c, d);
t[0] = x[2*j];
t[1] = x[2*j+1];
x[2*j] = d[0][0]*t[0] + d[0][1]*t[1];
x[2*j+1] = d[1][0]*t[0] + d[1][1]*t[1];
for (int i = 0; i < j; i++) {
x[2*i] -= A[2*i+2*j*LDA]*x[2*j] + A[2*i+(2*j+1)*LDA]*x[2*j+1];
x[2*i+1] -= A[2*i+1+2*j*LDA]*x[2*j] + A[2*i+1+(2*j+1)*LDA]*x[2*j+1];
}
}
}
else if (TRANS == 'T') {
for (int i = 0; i < n; i++) {
for (int j = 0; j < i; j++) {
x[2*i] -= A[2*j+2*i*LDA]*x[2*j] + A[2*j+1+2*i*LDA]*x[2*j+1];
x[2*i+1] -= A[2*j+(2*i+1)*LDA]*x[2*j] + A[2*j+1+(2*i+1)*LDA]*x[2*j+1];
}
c[0][0] = A[2*i+2*i*LDA];
c[0][1] = A[2*i+1+2*i*LDA];
c[1][0] = A[2*i+(2*i+1)*LDA];
c[1][1] = A[2*i+1+(2*i+1)*LDA];
X(inverse_2x2)(c, d);
t[0] = x[2*i];
t[1] = x[2*i+1];
x[2*i] = d[0][0]*t[0] + d[0][1]*t[1];
x[2*i+1] = d[1][0]*t[0] + d[1][1]*t[1];
}
}
}
// B ← A*B, B ← Aᵀ*B
void X(btrmm)(char TRANS, int n, FLT * A, int LDA, FLT * B, int LDB, int N) {
#pragma omp parallel for
for (int j = 0; j < N; j++)
X(btrmv)(TRANS, n, A, LDA, B+j*LDB);
}
// B ← A⁻¹*B, B ← A⁻ᵀ*B
void X(btrsm)(char TRANS, int n, FLT * A, int LDA, FLT * B, int LDB, int N) {
#pragma omp parallel for
for (int j = 0; j < N; j++)
X(btrsv)(TRANS, n, A, LDA, B+j*LDB);
}
// x ← A*x, x ← Aᵀ*x
void X(bbfmv)(char TRANS, X(btb_eigen_FMM) * F, FLT * x) {
int n = F->n;
FLT * s = F->s, * c = F->c, t1, t2;
if (TRANS == 'N') {
// Apply upper-triangular part
X(bfmv)(TRANS, F->F, x);
// Apply Givens rotations
for (int i = 0; i < n; i++) {
t1 = c[i]*x[2*i]-s[i]*x[2*i+1];
t2 = c[i]*x[2*i+1]+s[i]*x[2*i];
x[2*i] = t1;
x[2*i+1] = t2;
}
}
else if (TRANS == 'T') {
// Apply Givens rotations
for (int i = 0; i < n; i++) {
t1 = c[i]*x[2*i]+s[i]*x[2*i+1];
t2 = c[i]*x[2*i+1]-s[i]*x[2*i];
x[2*i] = t1;
x[2*i+1] = t2;
}
// Apply upper-triangular part
X(bfmv)(TRANS, F->F, x);
}
}
// x ← A⁻¹*x, x ← A⁻ᵀ*x
void X(bbfsv)(char TRANS, X(btb_eigen_FMM) * F, FLT * x) {
int n = F->n;
FLT * s = F->s, * c = F->c, t1, t2;
if (TRANS == 'N') {
// Apply Givens rotations
for (int i = 0; i < n; i++) {
t1 = c[i]*x[2*i]+s[i]*x[2*i+1];
t2 = c[i]*x[2*i+1]-s[i]*x[2*i];
x[2*i] = t1;
x[2*i+1] = t2;
}
// Apply upper-triangular part
X(bfsv)(TRANS, F->F, x);
}
else if (TRANS == 'T') {
// Apply upper-triangular part
X(bfsv)(TRANS, F->F, x);
// Apply Givens rotations
for (int i = 0; i < n; i++) {
t1 = c[i]*x[2*i]-s[i]*x[2*i+1];
t2 = c[i]*x[2*i+1]+s[i]*x[2*i];
x[2*i] = t1;
x[2*i+1] = t2;
}
}
}
void X(bbfmm)(char TRANS, X(btb_eigen_FMM) * F, FLT * B, int LDB, int N) {
#pragma omp parallel for
for (int j = 0; j < N; j++)
X(bbfmv)(TRANS, F, B+j*LDB);
}
void X(bbfsm)(char TRANS, X(btb_eigen_FMM) * F, FLT * B, int LDB, int N) {
#pragma omp parallel for
for (int j = 0; j < N; j++)
X(bbfsv)(TRANS, F, B+j*LDB);
}
// x ← A*x, x ← Aᵀ*x
void X(bbbfmv)(char TRANS, char DBLOCK, char RBLOCK, X(btb_eigen_FMM) * F, FLT * x) {
int n = F->n;
FLT * t = F->t+2*n*FT_GET_THREAD_NUM();
if (DBLOCK == '1') {
for (int i = 0; i < n; i++) {
t[2*i] = x[i];
t[2*i+1] = 0;
}
}
else if (DBLOCK == '2') {
for (int i = 0; i < n; i++) {
t[2*i] = 0;
t[2*i+1] = x[i];
}
}
X(bbfmv)(TRANS, F, t);
if (RBLOCK == '1')
for (int i = 0; i < n; i++)
x[i] = t[2*i];
else if (RBLOCK == '2')
for (int i = 0; i < n; i++)
x[i] = t[2*i+1];
}
// x ← A⁻¹*x, x ← A⁻ᵀ*x
void X(bbbfsv)(char TRANS, char DBLOCK, char RBLOCK, X(btb_eigen_FMM) * F, FLT * x) {
int n = F->n;
FLT * t = F->t+2*n*FT_GET_THREAD_NUM();
if (RBLOCK == '1') {
for (int i = 0; i < n; i++) {
t[2*i] = x[i];
t[2*i+1] = 0;
}
}
else if (RBLOCK == '2') {
for (int i = 0; i < n; i++) {
t[2*i] = 0;
t[2*i+1] = x[i];
}
}
X(bbfsv)(TRANS, F, t);
if (DBLOCK == '1')
for (int i = 0; i < n; i++)
x[i] = t[2*i];
else if (DBLOCK == '2')
for (int i = 0; i < n; i++)
x[i] = t[2*i+1];
}
void X(bbbfmm)(char TRANS, char DBLOCK, char RBLOCK, X(btb_eigen_FMM) * F, FLT * B, int LDB, int N) {
#pragma omp parallel for
for (int j = 0; j < N; j++)
X(bbbfmv)(TRANS, DBLOCK, RBLOCK, F, B+j*LDB);
}
void X(bbbfsm)(char TRANS, char DBLOCK, char RBLOCK, X(btb_eigen_FMM) * F, FLT * B, int LDB, int N) {
#pragma omp parallel for
for (int j = 0; j < N; j++)
X(bbbfsv)(TRANS, DBLOCK, RBLOCK, F, B+j*LDB);
}
|
doitgen.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "doitgen.h"
/* Array initialization. */
static
void init_array(int nr, int nq, int np,
DATA_TYPE POLYBENCH_3D(A,NR,NQ,NP,nr,nq,np),
DATA_TYPE POLYBENCH_2D(C4,NP,NP,np,np))
{
int i __attribute__((annotate("scalar(range(0, " PB_XSTR(NR) ") final)")));
int j __attribute__((annotate("scalar(range(0, " PB_XSTR(NQ) ") final)")));
int k __attribute__((annotate("scalar(range(0, " PB_XSTR(NP) ") final)")));
for (i = 0; i < nr; i++)
for (j = 0; j < nq; j++)
for (k = 0; k < np; k++)
A[i][j][k] = ((DATA_TYPE) i*j + k) / np;
for (i = 0; i < np; i++)
for (j = 0; j < np; j++)
C4[i][j] = ((DATA_TYPE) i*j) / np;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nr, int nq, int np,
DATA_TYPE POLYBENCH_3D(A,NR,NQ,NP,nr,nq,np))
{
int i, j, k;
for (i = 0; i < nr; i++)
for (j = 0; j < nq; j++)
for (k = 0; k < np; k++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, A[i][j][k]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_doitgen(int nr, int nq, int np,
DATA_TYPE POLYBENCH_3D(A,NR,NQ,NP,nr,nq,np),
DATA_TYPE POLYBENCH_2D(C4,NP,NP,np,np),
DATA_TYPE POLYBENCH_3D(sum,NR,NQ,NP,nr,nq,np))
{
int r, q, p, s;
#pragma scop
#pragma omp parallel
{
#pragma omp for private (q, p, s)
for (r = 0; r < _PB_NR; r++)
for (q = 0; q < _PB_NQ; q++)
{
for (p = 0; p < _PB_NP; p++)
{
sum[r][q][p] = 0;
for (s = 0; s < _PB_NP; s++)
sum[r][q][p] = sum[r][q][p] + A[r][q][s] * C4[s][p];
}
for (p = 0; p < _PB_NR; p++)
A[r][q][p] = sum[r][q][p];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nr = NR;
int nq = NQ;
int np = NP;
/* Variable declaration/allocation. */
POLYBENCH_3D_ARRAY_DECL(A,DATA_TYPE __attribute__((annotate("target('A') scalar(range(0, 1000000) final)"))),NR,NQ,NP,nr,nq,np);
POLYBENCH_3D_ARRAY_DECL(sum,DATA_TYPE __attribute__((annotate("target('sum') scalar(range(0, 1000000) final)"))),NR,NQ,NP,nr,nq,np);
POLYBENCH_2D_ARRAY_DECL(C4,DATA_TYPE __attribute__((annotate("target('C4') scalar()"))),NP,NP,np,np);
/* Initialize array(s). */
init_array (nr, nq, np,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(C4));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_doitgen (nr, nq, np,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(C4),
POLYBENCH_ARRAY(sum));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nr, nq, np, POLYBENCH_ARRAY(A)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(sum);
POLYBENCH_FREE_ARRAY(C4);
return 0;
}
|
private-modificado.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main()
{
int i, n = 7;
int a[n], suma=0;
for (i=0; i<n; i++)
a[i] = i;
#pragma omp parallel private(suma)
{
//suma=0;
#pragma omp for
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf("thread %d suma a[%d] / ", omp_get_thread_num(), i);
}
printf("\n* thread %d suma= %d\n", omp_get_thread_num(), suma);
}
}
|
rose_outer_only.c | /* Only the outmost loop can be parallelized
*/
#include "omp.h"
void foo()
{
int n = 100;
int m = 100;
double b[n][m];
int i;
int j;
#pragma omp parallel for private (i,j) firstprivate (n,m)
for (i = 0; i <= n - 1; i += 1) {
for (j = 0; j <= m - 1; j += 1) {
b[i][j] = b[i][j - 1];
}
}
}
/*
Unparallelizable loop at line:9 due to the following dependencies:
1*1 TRUE_DEP DATA_DEP; commonlevel = 1 CarryLevel = 0 Is precise SgPntrArrRefExp:(b[i])[j]@10:14->SgPntrArrRefExp:((b[i])[j - 1])@10:19 == -1;||::
*/
|
DRACC_OMP_022_MxV_Missing_Data_yes.c | /*
Matrix Vector multiplication with Matrix missing on Accelerator. Using the target enter data construct.
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#define C 5000
int *a;
int *b;
int *c;
int init(){
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
b[j+i*C]=1;
}
a[i]=1;
c[i]=0;
}
return 0;
}
int Mult(){
#pragma omp target map(to:a[0:C]) map(tofrom:c[0:C]) map(alloc:b[0:C*C]) device(0)
{
#pragma omp teams distribute parallel for
for(int i=0; i<C; i++){
for(int j=0; j<C; j++){
c[i]+=b[j+i*C]*a[j];
}
}
}
return 0;
}
int check(){
bool test = false;
for(int i=0; i<C; i++){
if(c[i]!=C){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
a = malloc(C*sizeof(int));
b = malloc(C*C*sizeof(int));
c = malloc(C*sizeof(int));
init();
Mult();
check();
free(a);
free(b);
free(c);
return 0;
} |
test_taskwait.c | //===-- test_taskwait.cc - Test the "taskwait" construct ----------*- C -*-===//
//
// Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
//
// This file has been modified from the file
// openmp/runtime/test/tasking/omp_taskwait.c
// of the LLVM project (https://github.com/llvm/llvm-project)
// under the Apache License v2.0 with LLVM Exceptions.
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include "omp.h"
#include "tests.h"
int test_omp_taskwait(void) {
int result1 = 0; /* Stores number of not finished tasks after the taskwait */
int result2 = 0; /* Stores number of wrong array elements at the end */
int array[NUM_TASKS];
int i;
/* fill array */
for (i = 0; i < NUM_TASKS; i++)
array[i] = 0;
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
/* First we have to store the value of the loop index in a new variable
* which will be private for each task because otherwise it will be overwritten
* if the execution of the task takes longer than the time which is needed to
* enter the next step of the loop!
*/
int myi;
myi = i;
#pragma omp task
{
printf("Task %i sleeping in thread %d\n", myi, omp_get_thread_num());
sleep(SLEEPTIME);
array[myi] = 1;
} /* end of omp task */
} /* end of for */
printf("At taskwait construct\n");
#pragma omp taskwait
/* check if all tasks were finished */
for (i = 0; i < NUM_TASKS; i++)
if (array[i] != 1)
result1++;
/* generate some more tasks which now shall overwrite
* the values in the tids array */
for (i = 0; i < NUM_TASKS; i++) {
int myi;
myi = i;
#pragma omp task
{
printf("Update task %d\n", myi);
array[myi] = 2;
} /* end of omp task */
} /* end of for */
} /* end of single */
} /*end of parallel */
/* final check, if all array elements contain the right values: */
for (i = 0; i < NUM_TASKS; i++) {
if (array[i] != 2)
result2++;
}
return ((result1 == 0) && (result2 == 0));
}
int main(void) {
int num_failed = 0;
if (!test_omp_taskwait()) {
num_failed++;
}
return num_failed != 0 ? EXIT_FAILURE : EXIT_SUCCESS;
}
|
GB_unop__bnot_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_uint32_uint32)
// op(A') function: GB (_unop_tran__bnot_uint32_uint32)
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_uint32_uint32)
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = ~(z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_uint32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ordered_doacross_codegen.c | // RUN: %clang_cc1 -verify -fopenmp -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK: [[KMP_DIM:%.+]] = type { i64, i64, i64 }
extern int n;
int a[10], b[10], c[10], d[10];
void foo();
// CHECK-LABEL: @main()
int main() {
int i;
// CHECK: [[DIMS:%.+]] = alloca [1 x [[KMP_DIM]]],
// CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT:%.+]])
// CHECK: icmp
// CHECK-NEXT: br i1 %
// CHECK: [[CAST:%.+]] = bitcast [1 x [[KMP_DIM]]]* [[DIMS]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 [[CAST]], i8 0, i64 24, i1 false)
// CHECK: [[DIM:%.+]] = getelementptr inbounds [1 x [[KMP_DIM]]], [1 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 0
// CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 1
// CHECK: store i64 %{{.+}}, i64* %
// CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIM]], i32 0, i32 2
// CHECK: store i64 1, i64* %
// CHECK: [[DIM:%.+]] = getelementptr inbounds [1 x [[KMP_DIM]]], [1 x [[KMP_DIM]]]* [[DIMS]], i64 0, i64 0
// CHECK: [[CAST:%.+]] = bitcast [[KMP_DIM]]* [[DIM]] to i8*
// CHECK: call void @__kmpc_doacross_init([[IDENT]], i32 [[GTID]], i32 1, i8* [[CAST]])
// CHECK: call void @__kmpc_for_static_init_4(
#pragma omp for ordered(1)
for (i = 0; i < n; ++i) {
a[i] = b[i] + 1;
foo();
// CHECK: call void [[FOO:.+]](
// CHECK: load i32, i32* [[I:%.+]],
// CHECK-NEXT: sub nsw i32 %{{.+}}, 0
// CHECK-NEXT: sdiv i32 %{{.+}}, 1
// CHECK-NEXT: sext i32 %{{.+}} to i64
// CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT:%.+]], i64 0, i64 0
// CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]],
// CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT]], i64 0, i64 0
// CHECK-NEXT: call void @__kmpc_doacross_post([[IDENT]], i32 [[GTID]], i64* [[TMP]])
#pragma omp ordered depend(source)
c[i] = c[i] + 1;
foo();
// CHECK: call void [[FOO]]
// CHECK: load i32, i32* [[I]],
// CHECK-NEXT: sub nsw i32 %{{.+}}, 2
// CHECK-NEXT: sub nsw i32 %{{.+}}, 0
// CHECK-NEXT: sdiv i32 %{{.+}}, 1
// CHECK-NEXT: sext i32 %{{.+}} to i64
// CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT:%.+]], i64 0, i64 0
// CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP]],
// CHECK-NEXT: [[TMP:%.+]] = getelementptr inbounds [1 x i64], [1 x i64]* [[CNT]], i64 0, i64 0
// CHECK-NEXT: call void @__kmpc_doacross_wait([[IDENT]], i32 [[GTID]], i64* [[TMP]])
#pragma omp ordered depend(sink : i - 2)
d[i] = a[i - 2];
}
// CHECK: call void @__kmpc_for_static_fini(
// CHECK: call void @__kmpc_doacross_fini([[IDENT]], i32 [[GTID]])
// CHECK: ret i32 0
return 0;
}
#endif // HEADER
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.