source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_binop__land_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__land_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__land_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__land_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_uint8)
// A*D function (colscale): GB (_AxD__land_uint8)
// D*A function (rowscale): GB (_DxB__land_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__land_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__land_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_uint8)
// C=scalar+B GB (_bind1st__land_uint8)
// C=scalar+B' GB (_bind1st_tran__land_uint8)
// C=A+scalar GB (_bind2nd__land_uint8)
// C=A'+scalar GB (_bind2nd_tran__land_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_UINT8 || GxB_NO_LAND_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__land_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
core_dlaset.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlaset.c, normal z -> d, Fri Sep 28 17:38:22 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
// for memset function
#include <string.h>
/***************************************************************************//**
*
* @ingroup core_laset
*
* Sets the elements of the matrix A on the diagonal
* to beta and on the off-diagonals to alpha
*
*******************************************************************************
*
* @param[in] uplo
* Specifies which elements of the matrix are to be set
* - PlasmaUpper: Upper part of A is set;
* - PlasmaLower: Lower part of A is set;
* - PlasmaUpperLower: ALL elements of A are set.
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] alpha
* The constant to which the off-diagonal elements are to be set.
*
* @param[in] beta
* The constant to which the diagonal elements are to be set.
*
* @param[in,out] A
* On entry, the m-by-n tile A.
* On exit, A has been set accordingly.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_dlaset(plasma_enum_t uplo, int m, int n,
double alpha, double beta,
double *A, int lda)
{
if (alpha == 0.0 && beta == 0.0 && uplo == PlasmaGeneral && m == lda) {
// Use memset to zero continuous memory.
memset((void*)A, 0, (size_t)m*n*sizeof(double));
}
else {
// Use LAPACKE_dlaset_work to initialize the matrix.
LAPACKE_dlaset_work(LAPACK_COL_MAJOR, lapack_const(uplo),
m, n, alpha, beta, A, lda);
}
}
/******************************************************************************/
void plasma_core_omp_dlaset(plasma_enum_t uplo,
int mb, int nb,
int i, int j,
int m, int n,
double alpha, double beta,
double *A)
{
#pragma omp task depend(out:A[0:mb*nb])
plasma_core_dlaset(uplo, m, n,
alpha, beta,
A+i+j*mb, mb);
}
|
convolutiondepthwise_3x3_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k10 = _mm256_loadu_ps(k0 + 24);
__m256 _k11 = _mm256_loadu_ps(k0 + 32);
__m256 _k12 = _mm256_loadu_ps(k0 + 40);
__m256 _k20 = _mm256_loadu_ps(k0 + 48);
__m256 _k21 = _mm256_loadu_ps(k0 + 56);
__m256 _k22 = _mm256_loadu_ps(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 7 < outw; j += 8)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1);
__m256 _sum2 = _bias0;
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm256_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm256_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm256_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm256_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm256_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm256_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm256_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm256_fmadd_ps(_k22, _r24, _sum2);
__m256 _sum3 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm256_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm256_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm256_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm256_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm256_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm256_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm256_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm256_fmadd_ps(_k22, _r25, _sum3);
__m256 _sum4 = _bias0;
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r16 = _mm256_loadu_ps(r1 + 48);
__m256 _r26 = _mm256_loadu_ps(r2 + 48);
_mm256_storeu_ps(outptr0 + 24, _sum3);
_sum4 = _mm256_fmadd_ps(_k00, _r04, _sum4);
_sum4 = _mm256_fmadd_ps(_k01, _r05, _sum4);
_sum4 = _mm256_fmadd_ps(_k02, _r06, _sum4);
_sum4 = _mm256_fmadd_ps(_k10, _r14, _sum4);
_sum4 = _mm256_fmadd_ps(_k11, _r15, _sum4);
_sum4 = _mm256_fmadd_ps(_k12, _r16, _sum4);
_sum4 = _mm256_fmadd_ps(_k20, _r24, _sum4);
_sum4 = _mm256_fmadd_ps(_k21, _r25, _sum4);
_sum4 = _mm256_fmadd_ps(_k22, _r26, _sum4);
__m256 _sum5 = _bias0;
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _r17 = _mm256_loadu_ps(r1 + 56);
__m256 _r27 = _mm256_loadu_ps(r2 + 56);
_mm256_storeu_ps(outptr0 + 32, _sum4);
_sum5 = _mm256_fmadd_ps(_k00, _r05, _sum5);
_sum5 = _mm256_fmadd_ps(_k01, _r06, _sum5);
_sum5 = _mm256_fmadd_ps(_k02, _r07, _sum5);
_sum5 = _mm256_fmadd_ps(_k10, _r15, _sum5);
_sum5 = _mm256_fmadd_ps(_k11, _r16, _sum5);
_sum5 = _mm256_fmadd_ps(_k12, _r17, _sum5);
_sum5 = _mm256_fmadd_ps(_k20, _r25, _sum5);
_sum5 = _mm256_fmadd_ps(_k21, _r26, _sum5);
_sum5 = _mm256_fmadd_ps(_k22, _r27, _sum5);
__m256 _sum6 = _bias0;
__m256 _r08 = _mm256_loadu_ps(r0 + 64);
__m256 _r18 = _mm256_loadu_ps(r1 + 64);
__m256 _r28 = _mm256_loadu_ps(r2 + 64);
_mm256_storeu_ps(outptr0 + 40, _sum5);
_sum6 = _mm256_fmadd_ps(_k00, _r06, _sum6);
_sum6 = _mm256_fmadd_ps(_k01, _r07, _sum6);
_sum6 = _mm256_fmadd_ps(_k02, _r08, _sum6);
_sum6 = _mm256_fmadd_ps(_k10, _r16, _sum6);
_sum6 = _mm256_fmadd_ps(_k11, _r17, _sum6);
_sum6 = _mm256_fmadd_ps(_k12, _r18, _sum6);
_sum6 = _mm256_fmadd_ps(_k20, _r26, _sum6);
_sum6 = _mm256_fmadd_ps(_k21, _r27, _sum6);
_sum6 = _mm256_fmadd_ps(_k22, _r28, _sum6);
__m256 _sum7 = _bias0;
__m256 _r09 = _mm256_loadu_ps(r0 + 72);
__m256 _r19 = _mm256_loadu_ps(r1 + 72);
__m256 _r29 = _mm256_loadu_ps(r2 + 72);
_mm256_storeu_ps(outptr0 + 48, _sum6);
_sum7 = _mm256_fmadd_ps(_k00, _r07, _sum7);
_sum7 = _mm256_fmadd_ps(_k01, _r08, _sum7);
_sum7 = _mm256_fmadd_ps(_k02, _r09, _sum7);
_sum7 = _mm256_fmadd_ps(_k10, _r17, _sum7);
_sum7 = _mm256_fmadd_ps(_k11, _r18, _sum7);
_sum7 = _mm256_fmadd_ps(_k12, _r19, _sum7);
_sum7 = _mm256_fmadd_ps(_k20, _r27, _sum7);
_sum7 = _mm256_fmadd_ps(_k21, _r28, _sum7);
_sum7 = _mm256_fmadd_ps(_k22, _r29, _sum7);
_mm256_storeu_ps(outptr0 + 56, _sum7);
r0 += 64;
r1 += 64;
r2 += 64;
outptr0 += 64;
}
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1);
__m256 _sum2 = _bias0;
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm256_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm256_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm256_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm256_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm256_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm256_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm256_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm256_fmadd_ps(_k22, _r24, _sum2);
__m256 _sum3 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm256_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm256_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm256_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm256_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm256_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm256_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm256_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm256_fmadd_ps(_k22, _r25, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 32;
r1 += 32;
r2 += 32;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
}
}
}
static void convdw3x3s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k10 = _mm256_loadu_ps(k0 + 24);
__m256 _k11 = _mm256_loadu_ps(k0 + 32);
__m256 _k12 = _mm256_loadu_ps(k0 + 40);
__m256 _k20 = _mm256_loadu_ps(k0 + 48);
__m256 _k21 = _mm256_loadu_ps(k0 + 56);
__m256 _k22 = _mm256_loadu_ps(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r24, _sum1);
__m256 _sum2 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r16 = _mm256_loadu_ps(r1 + 48);
__m256 _r26 = _mm256_loadu_ps(r2 + 48);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_fmadd_ps(_k00, _r04, _sum2);
_sum2 = _mm256_fmadd_ps(_k01, _r05, _sum2);
_sum2 = _mm256_fmadd_ps(_k02, _r06, _sum2);
_sum2 = _mm256_fmadd_ps(_k10, _r14, _sum2);
_sum2 = _mm256_fmadd_ps(_k11, _r15, _sum2);
_sum2 = _mm256_fmadd_ps(_k12, _r16, _sum2);
_sum2 = _mm256_fmadd_ps(_k20, _r24, _sum2);
_sum2 = _mm256_fmadd_ps(_k21, _r25, _sum2);
_sum2 = _mm256_fmadd_ps(_k22, _r26, _sum2);
__m256 _sum3 = _bias0;
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _r17 = _mm256_loadu_ps(r1 + 56);
__m256 _r27 = _mm256_loadu_ps(r2 + 56);
__m256 _r08 = _mm256_loadu_ps(r0 + 64);
__m256 _r18 = _mm256_loadu_ps(r1 + 64);
__m256 _r28 = _mm256_loadu_ps(r2 + 64);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_fmadd_ps(_k00, _r06, _sum3);
_sum3 = _mm256_fmadd_ps(_k01, _r07, _sum3);
_sum3 = _mm256_fmadd_ps(_k02, _r08, _sum3);
_sum3 = _mm256_fmadd_ps(_k10, _r16, _sum3);
_sum3 = _mm256_fmadd_ps(_k11, _r17, _sum3);
_sum3 = _mm256_fmadd_ps(_k12, _r18, _sum3);
_sum3 = _mm256_fmadd_ps(_k20, _r26, _sum3);
_sum3 = _mm256_fmadd_ps(_k21, _r27, _sum3);
_sum3 = _mm256_fmadd_ps(_k22, _r28, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 2 * 32;
r1 += 2 * 32;
r2 += 2 * 32;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r24, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 2 * 16;
r1 += 2 * 16;
r2 += 2 * 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
Process.h | #ifndef PROCESS_H_
#define PROCESS_H_
/* =========================================================================
Copyright (c) 2008-2015, Institute for Microelectronics, TU Wien.
-----------------
ViennaTS - The Vienna Topography Simulator
-----------------
Contact: viennats@iue.tuwien.ac.at
License: MIT (X11), see file LICENSE in the base directory
============================================================================= */
#include "Time.h"
#include "calc.h"
#include <vector>
#include <fstream>
#include <string>
#include <list>
#include <algorithm>
#include <iostream>
#define BOOST_NO_HASH
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/connected_components.hpp>
#include "message.h"
#include "Partition/PartitionNeighborLinksArrays.h"
#include "Partition/PartitionUpDownLinkTree.h"
#include "Partition/PartitionFullGrid.h"
#include "./LSlib/vector.hpp"
#include "boundaries.h"
///Process related objects and methods.
namespace proc {
template <class LevelSetType> void AddLayer(std::list<LevelSetType>& LS, int num_layers) {
for (int i=0;i<num_layers;++i) {
LS.push_back(LS.back());
}
for (int i=0;i>num_layers;--i) {
assert(LS.size()>=2);
LS.erase((LS.end()--)--);
}
}
template <class LevelSetsType> void DetermineTopMostLayer(
const LevelSetsType& LS,
std::vector<unsigned int>& PointMaterials) {
//this function determines the materials of the most top levelset
typedef typename LevelSetsType::value_type LevelSetType;
PointMaterials.clear();
PointMaterials.resize(LS.back().num_active_pts());
typename LevelSetType::points_type segmentation=LS.back().get_new_segmentation();
#pragma omp for schedule(static, 1) // parallelization - Iterations divided into chunks of size 1. Each chunk is assigned to a thread
for (int p=0;p<= static_cast<int>(segmentation.size());++p) {
typename LevelSetType::point_type begin_v=(p==0)?LS.back().grid().min_point_index():segmentation[p-1];
typename LevelSetType::point_type end_v=(p!=static_cast<int>(segmentation.size()))?segmentation[p]:LS.back().grid().increment_indices(LS.back().grid().max_point_index());
//iterator necessary to access
std::vector< typename LevelSetType::const_iterator_runs> ITs;
for (typename LevelSetsType::const_iterator it=LS.begin();&(*it)!=&(LS.back());++it) ITs.push_back(typename LevelSetType::const_iterator_runs(*it,begin_v));
for (typename LevelSetType::const_iterator_runs it(LS.back(),begin_v );it.start_indices()<end_v;it.next()) {
if (!it.is_active()) continue;
const typename LevelSetType::value_type d=it.value2();
int z=LS.size()-1;
for (;z>0;z--) {
ITs[z-1].go_to_indices_sequential(it.start_indices());
if (d<ITs[z-1].value()) break;
}
PointMaterials[it.active_pt_id2()]=LS.size()-1-z;
}
}
}
namespace {
template <class I1, class I2>
bool connected(const I1& it1, const I2& it2) {
return (it1.sign()==it2.sign());
}
}
template <class LStype> std::pair<unsigned int, unsigned int> CalculateConnectivities(
const LStype& l,
std::vector<bool>& Connectivities,
bool is_open_boundary_negative) {
const int D=LStype::dimensions;
boost::adjacency_list <boost::setS, boost::vecS, boost::undirectedS> Graph;
unsigned int num_components=0;
//unsigned int total_number_of_runs=0;
//allocate memory for component list
// std::vector<int> comp_lst[l.number_of_segments()][D+1];
// std::vector<int> *comp_lst = new std::vector<int> [l.number_of_segments()][D+1];
std::vector<int>** comp_lst = new std::vector<int>* [l.number_of_segments()];
for (unsigned int i=0;i<l.number_of_segments();++i) {
comp_lst[i] = new std::vector<int>[D+1];
}
for (unsigned int sub=0;sub<l.number_of_segments();++sub) {
for (int i = -1;i<D;++i) {
comp_lst[sub][i+1].resize(l.number_of_runs(i,sub),-1);
//total_number_of_runs+=l.number_of_runs(i,sub);
}
}
bool is_first_run=true;
int node_of_first_run=0;
int node_of_last_run=0;
//cycle through
for (typename LStype::template const_iterator_neighbor_filtered<typename LStype::filter_all,1> it(l);!it.is_finished();it.next()) {
int & tc = comp_lst[it.center().get_segment_num()][it.center().get_level()][it.center().run_type_position()];
if (tc==-1) {
for (int k=0;k<2*D;++k) {
const int & tn= comp_lst[it.neighbor(k).get_segment_num()][it.neighbor(k).get_level()][it.neighbor(k).run_type_position()];
if (tn!=-1) {
if (connected(it.center(),it.neighbor(k))) {
tc=tn;
break;
}
}
}
}
if (tc==-1) {
tc=num_components;
boost::add_vertex(Graph);
++num_components;
}
for (int k=0;k<2*D;++k) {
int & tn= comp_lst[it.neighbor(k).get_segment_num()][it.neighbor(k).get_level()][it.neighbor(k).run_type_position()];
if (connected(it.center(),it.neighbor(k))) {
if (tn!=-1) {
if (tc!=tn) boost::add_edge(tc,tn,Graph);
} else {
tn=tc;
}
}
}
if (is_first_run) {
is_first_run=false;
node_of_first_run=tc;
}
node_of_last_run=tc;
}
assert(boost::num_vertices(Graph)==num_components);
std::vector<int> component(boost::num_vertices(Graph));
unsigned int num_components_after = connected_components(Graph, &component[0]);
//determine component number of source region
int source_node=(is_open_boundary_negative)?component[node_of_first_run]:component[node_of_last_run];
Connectivities.clear();
for (typename LStype::template const_iterator_neighbor_filtered<typename LStype::filter_active,1> it(l);!it.is_finished();it.next()) {
if (it.center().sign()==lvlset::POS_SIGN) {
assert(it.center().get_level()==0);
assert(it.center().get_segment_num()<l.number_of_segments());
Connectivities.push_back(component[comp_lst[it.center().get_segment_num()][0][it.center().run_type_position()]]==source_node); //TODO
} else {
int k;
for (k=0;k<2*D;++k) {
if (component[comp_lst[it.neighbor(k).get_segment_num()][it.neighbor(k).get_level()][it.neighbor(k).run_type_position()]]==source_node) break;
}
Connectivities.push_back(k!=2*D);
}
}
for(unsigned int i=0;i<l.number_of_segments();++i)
{
delete[] comp_lst[i];
}
delete[] comp_lst;
return std::make_pair(num_components, num_components_after);
}
template <class LStype> void CalculateVisibilities(
const LStype& l,
std::vector<bool>& Visibilities,
int open_boundary_direction,
bool is_open_boundary_negative) {
const int D=LStype::dimensions;
const typename LStype::value_type max=std::numeric_limits<typename LStype::value_type>::max();
Visibilities.resize(l.num_active_pts());
std::vector<typename LStype::index_type> old_indices(D-1-open_boundary_direction, std::numeric_limits<typename LStype::index_type>::max());
unsigned int size=1;
for (int i=0;i<open_boundary_direction;++i) {
assert(!l.grid().is_pos_boundary_infinite(i));
assert(!l.grid().is_neg_boundary_infinite(i));
size*=(l.grid().max_point_index(i)-l.grid().min_point_index(i)+1);
}
std::vector<typename LStype::value_type> min_values(size, max);
typename LStype::size_type id=0;
typename LStype::const_iterator_runs it(l,!is_open_boundary_negative);
while (!it.is_finished()) {
for (int i=0;i<D-1-open_boundary_direction;++i) {
bool b=false;
if (old_indices[i]!=it.start_indices(i+open_boundary_direction+1)) {
old_indices[i]=it.start_indices(i+open_boundary_direction+1);
b=true;
}
if (b) min_values.assign(size,max);
}
unsigned int pos_begin=0;
unsigned int pos_end=0;
for (int i=open_boundary_direction-1;i>=0;--i) {
pos_begin*=(l.grid().max_point_index(i)-l.grid().min_point_index(i)+1);
pos_end*=(l.grid().max_point_index(i)-l.grid().min_point_index(i)+1);
pos_begin+=(it.start_indices(i)-l.grid().min_point_index(i));
pos_end+=(it.end_indices(i)-l.grid().min_point_index(i));
}
if (it.is_active()) {
Visibilities[is_open_boundary_negative?id:(l.num_active_pts()-1-id)]=(it.value()<min_values.at(pos_begin));
++id;
}
for (unsigned int i=pos_begin; i<=pos_end;++i) min_values.at(i)=std::min(min_values.at(i), it.value());
if (is_open_boundary_negative) {
it.next();
} else {
it.previous();
}
}
assert(id==l.num_active_pts());
}
namespace {
///Holds information about the velocities of grid points
template <class ModelType, int Dimensions> class VelocityClass {
const ModelType& Model;
const double* NormalVector;
const double* Coverages;
const double* Rates;
const std::vector<bool>& Connectivities;
const std::vector<bool>& Visibilities;
public:
VelocityClass( const ModelType& m,
const double * n,
const double * c,
const double * r,
const std::vector<bool>& co,
const std::vector<bool>& vi
) : Model(m), NormalVector(n), Coverages(c), Rates(r), Connectivities(co), Visibilities(vi) {}
double operator()(unsigned int active_pt,int matnum) const {
double v;
Model.CalculateVelocity(
v,
calc::Make3DVector<Dimensions>(NormalVector+active_pt*Dimensions),
Coverages+active_pt*Model.CoverageStorageSize,
Rates+active_pt*Model.RatesStorageSize,
matnum,
(Model.CalculateConnectivities)?Connectivities[active_pt]:true,
(Model.CalculateVisibilities)?Visibilities[active_pt]:true
);
return v;
}
};
///Holds information about velocities of grid points.
template <class ModelType, int Dimensions> class VelocityClass2 {
const ModelType& Model;
const double* NormalVector;
const double* Coverages;
const double* Rates;
const std::vector<bool>& Connectivities;
const std::vector<bool>& Visibilities;
public:
VelocityClass2( const ModelType& m,
const double * n,
const double * c,
const double * r,
const std::vector<bool>& co,
const std::vector<bool>& vi
) : Model(m), NormalVector(n), Coverages(c), Rates(r), Connectivities(co), Visibilities(vi) {}
void scalar_velocity(double & v, unsigned int active_pt,int matnum) const {
Model.CalculateVelocity(
v,
calc::Make3DVector<Dimensions>(NormalVector+active_pt*Dimensions),
Coverages+active_pt*Model.CoverageStorageSize,
Rates+active_pt*Model.RatesStorageSize,
matnum,
(Model.CalculateConnectivities)?Connectivities[active_pt]:true,
(Model.CalculateVisibilities)?Visibilities[active_pt]:true);
}
void vector_velocity(double* v, unsigned int active_pt, double location, int matnum) const {
Model.CalculateVectorVelocity(
v,
calc::Make3DVector<Dimensions>(NormalVector+active_pt*Dimensions),
Coverages+active_pt*Model.CoverageStorageSize,
Rates+active_pt*Model.RatesStorageSize,
matnum,
(Model.CalculateConnectivities)?Connectivities[active_pt]:true,
(Model.CalculateVisibilities)?Visibilities[active_pt]:true);
}
};
///Holds all information about simulation in series data.
template <class ModelType, int Dimensions>
class DataAccessClass {
const ModelType& Model;
const double* Coverages;
const double* Rates;
const double* NormalVector;
const std::vector<unsigned int>& Materials;
const std::vector<bool>& Connectivities;
const std::vector<bool>& Visibilities;
bool OutputVelocities;
bool OutputCoverages;
bool OutputRates;
bool OutputMaterials;
public:
DataAccessClass( const ModelType& m,
const double * c,
const double * r,
const double * n,
const std::vector<unsigned int>& ma,
const std::vector<bool>& co,
const std::vector<bool>& vi,
bool out_v=false,
bool out_c=false,
bool out_r=false,
bool out_m=false
) : Model(m), Coverages(c), Rates(r), NormalVector(n), Materials(ma), Connectivities(co), Visibilities(vi), OutputVelocities(out_v), OutputCoverages(out_c), OutputRates(out_r), OutputMaterials(out_m) {}
int number_of_series() const {
return (1+ModelType::CoverageStorageSize+ModelType::RatesStorageSize+1);
}
template <class PT_ID_TYPE>
std::string get_series_data(PT_ID_TYPE active_pt_id, int series) const {
std::ostringstream out;
if (series==0) {
double v=0.;
unsigned int mat=0;
bool connected=true;
bool visible=true;
if (Materials.size()>0) mat= Materials[active_pt_id];
if (Connectivities.size()>0) connected=Connectivities[active_pt_id];
if (Visibilities.size()>0) visible=Visibilities[active_pt_id];
Model.CalculateVelocity(
v,
calc::Make3DVector<Dimensions>(NormalVector+active_pt_id*Dimensions),
Coverages+active_pt_id*Model.CoverageStorageSize,
Rates+active_pt_id*Model.RatesStorageSize,
mat,
connected,
visible
);
out << static_cast<float>(v);
} else if (series<=ModelType::CoverageStorageSize) {
out << static_cast<float>(Coverages[active_pt_id*ModelType::CoverageStorageSize+series-1]);
} else if (series<=ModelType::CoverageStorageSize+ModelType::RatesStorageSize) {
out << static_cast<float>(Rates[active_pt_id*ModelType::RatesStorageSize+series-ModelType::CoverageStorageSize-1]);
} else {
unsigned int mat=0;
if (Materials.size()>0) mat= Materials[active_pt_id];
out << mat;
}
return out.str();
}
std::string get_series_label(int series) const {
if (series==0) {
return std::string("Velocities");
} else if (series<=ModelType::CoverageStorageSize) {
std::ostringstream out;
out << "Coverage" << series-1;
return out.str();
} else if (series<=ModelType::CoverageStorageSize+ModelType::RatesStorageSize) {
std::ostringstream out;
out << "Rate" << series-ModelType::CoverageStorageSize-1;
return out.str();
} else {
return std::string("Material");
}
}
std::string get_series_type(int series) const {
if (series<=ModelType::CoverageStorageSize+ModelType::RatesStorageSize) {
return std::string("float");
} else {
return std::string("int");
}
}
bool get_series_output(int series) const {
if (series==0) {
return OutputVelocities;
} else if (series<=ModelType::CoverageStorageSize) {
return OutputCoverages;
} else if (series<=ModelType::CoverageStorageSize+ModelType::RatesStorageSize) {
return OutputRates;
} else {
return OutputMaterials;
}
}
};
}
template <class LevelSetsType, class ParameterType, class ProcessParameterType , class OutputInfoType> void ExecuteProcess(
LevelSetsType& LevelSets,
const model::Planarization& Model,
const ParameterType& Parameter,
const ProcessParameterType& ProcessParameter,
OutputInfoType & output_info
) {
typedef typename LevelSetsType::value_type LevelSetType;
LevelSets.push_back(LevelSetType(LevelSets.back().grid(), Model.get_coordinate()/Parameter.grid_delta, Parameter.open_boundary, Parameter.open_boundary_negative));
for (typename LevelSetsType::iterator it=LevelSets.begin();&(*it)!=&(LevelSets.back());++it) {
it->max(LevelSets.back()); //adjust all level set functions below the plane
it->prune(); //remove grid points which do not have at least one opposite signed neighbor
it->segment();
}
if (!Model.fill_up()) LevelSets.pop_back();
//TODO output and time
}
template <class LevelSetsType, class ParameterType, class ProcessParameterType, class OutputInfoType> void ExecuteProcess(
LevelSetsType& LevelSets,
const model::Mask& Model,
const ParameterType& Parameter,
const ProcessParameterType& ProcessParameter,
OutputInfoType & output_info
) {
typedef typename LevelSetsType::value_type LevelSetType;
const int D=LevelSetType::dimensions;
geometry::geometry<D> mask_geometry;
geometry::surface<D> mask_surface;
LevelSetType mask_ls(LevelSets.back().grid());
if(Model.file_name().find(".lvst") != std::string::npos){
mask_ls.import_levelset(Model.file_name());
} else {
if (Model.surface()) {
mask_surface.ReadVTK(Model.file_name(), Parameter.input_scale, Parameter.input_transformation,
Parameter.input_transformation_signs, Parameter.change_input_parity, Parameter.input_shift);
} else {
mask_geometry.Read(Model.file_name(),Parameter.input_scale,Parameter.input_transformation, Parameter.input_transformation_signs,
Parameter.change_input_parity, Parameter.material_mapping, Parameter.input_shift, Parameter.ignore_materials);
}
// mask_geometry.Read(Model.file_name(), Parameter.input_scale, Parameter.input_transformation, Parameter.input_transformation_signs, Parameter.change_input_parity, Parameter.material_mapping, Parameter.input_shift, Parameter.ignore_materials);
typedef std::list<geometry::surface<D> > SurfacesType;
SurfacesType Surfaces;
if (Model.surface()) {
Surfaces.push_back(mask_surface);
} else {
std::bitset<2*D> remove_flags;
for (int i=0;i<D;++i) {
if (Parameter.boundary_conditions[i].min==bnc::PERIODIC_BOUNDARY ||
Parameter.boundary_conditions[i].min==bnc::REFLECTIVE_BOUNDARY ||
Parameter.boundary_conditions[i].min==bnc::EXTENDED_BOUNDARY) {
remove_flags.set(i);
} else if (i==Parameter.open_boundary && !Parameter.open_boundary_negative && Model.remove_bottom()) {
remove_flags.set(i);
}
if (Parameter.boundary_conditions[i].min==bnc::PERIODIC_BOUNDARY ||
Parameter.boundary_conditions[i].min==bnc::REFLECTIVE_BOUNDARY ||
Parameter.boundary_conditions[i].min==bnc::EXTENDED_BOUNDARY) {
remove_flags.set(i+D);
} else if (i==Parameter.open_boundary && Parameter.open_boundary_negative && Model.remove_bottom()) {
remove_flags.set(i+D);
}
}
msg::print_start("Extract surface and interfaces...");
geometry::TransformGeometryToSurfaces(mask_geometry, Surfaces, remove_flags, Parameter.grid_delta*Parameter.snap_to_boundary_eps, Parameter.report_import_errors);
msg::print_done();
}
msg::print_start("Distance transformation...");
//LevelSetType mask_ls(LevelSets.back().grid());
init(mask_ls,Surfaces.back(),Parameter.report_import_errors);
msg::print_done();
}
// only put mask, where no other LS was before
if(!Model.ignore_other_materials()){
mask_ls.invert();
for(auto LS=LevelSets.begin(); LS != LevelSets.end(); ++LS){
mask_ls.min(*LS);
}
mask_ls.invert();
}
// wrap all higher levelsets around mask before pushing it to the front
for(auto LS=LevelSets.begin(); LS != LevelSets.end(); ++LS){
LS->min(mask_ls);
}
// now put the mask as the lowest levelset
LevelSets.push_front(mask_ls);
//TODO output and time
}
template <class LevelSetsType, class ParameterType, class ProcessParameterType, class OutputInfoType> void ExecuteProcess(
LevelSetsType& LevelSets,
const model::BooleanOps& Model,
const ParameterType& Parameter,
const ProcessParameterType& ProcessParameter,
OutputInfoType & output_info
) {
if (Model.level()==0) return;
typedef typename LevelSetsType::value_type LevelSetType;
const int D=LevelSetType::dimensions;
LevelSetType* boolop_ls;
if(!Model.file_name().empty()){
geometry::geometry<D> boolop_geometry;
geometry::surface<D> boolop_surface;// = new geometry::surface<D>;
if (Model.surface()) {
boolop_surface.ReadVTK(Model.file_name(), Parameter.input_scale, Parameter.input_transformation,
Parameter.input_transformation_signs, Parameter.change_input_parity, Parameter.input_shift);
} else {
boolop_geometry.Read(Model.file_name(),Parameter.input_scale,Parameter.input_transformation, Parameter.input_transformation_signs,
Parameter.change_input_parity, Parameter.material_mapping, Parameter.input_shift, Parameter.ignore_materials);
}
typedef std::list<geometry::surface<D> > SurfacesType;
SurfacesType Surfaces;
if (Model.surface()) {
Surfaces.push_back(boolop_surface);
} else {
std::bitset<2*D> remove_flags;
for (int i=0;i<D;++i) {
if (Parameter.boundary_conditions[i].min==bnc::PERIODIC_BOUNDARY ||
Parameter.boundary_conditions[i].min==bnc::REFLECTIVE_BOUNDARY ||
Parameter.boundary_conditions[i].min==bnc::EXTENDED_BOUNDARY) {
remove_flags.set(i);
} else if (i==Parameter.open_boundary && !Parameter.open_boundary_negative && Model.remove_bottom()) {
remove_flags.set(i);
}
if (Parameter.boundary_conditions[i].min==bnc::PERIODIC_BOUNDARY ||
Parameter.boundary_conditions[i].min==bnc::REFLECTIVE_BOUNDARY ||
Parameter.boundary_conditions[i].min==bnc::EXTENDED_BOUNDARY) {
remove_flags.set(i+D);
} else if (i==Parameter.open_boundary && Parameter.open_boundary_negative && Model.remove_bottom()) {
remove_flags.set(i+D);
}
}
//std::cout << "transform to surface\n";
geometry::TransformGeometryToSurfaces(boolop_geometry, Surfaces, remove_flags, Parameter.grid_delta*Parameter.snap_to_boundary_eps, Parameter.report_import_errors);
}
LevelSetType dummy_ls(LevelSets.back().grid());
init(dummy_ls,Surfaces.back(),Parameter.report_import_errors);
boolop_ls = &dummy_ls;
}
else if(Model.levelset()>0){ //If internal levelset should be used
typename LevelSetsType::iterator it = LevelSets.begin();
for(int i=0; i<Model.levelset(); ++i) ++it;
boolop_ls = &(*it);
} else{
return;
}
if (Model.level()>0) {
if (Model.invert()) boolop_ls->invert();
int j=0;
typename LevelSetsType::iterator ls_it = LevelSets.begin();
for (;j<static_cast<int>(LevelSets.size())-Model.level();++j) {
++ls_it;
}
while (ls_it!=LevelSets.end()) {
ls_it->min(*boolop_ls);
ls_it->prune();
ls_it->segment();
++ls_it;
}
if (Model.invert() && Model.levelset()>=0) boolop_ls->invert(); //Invert again so that the original levelset is not changed
} else { //Model.level()<0
if (Model.invert()) boolop_ls->invert();
int j=0;
typename LevelSetsType::iterator ls_it_old = LevelSets.begin();
typename LevelSetsType::iterator ls_it = LevelSets.begin();
for (;j<static_cast<int>(LevelSets.size())+Model.level();++j) {
ls_it_old=ls_it;
++ls_it;
}
if(!Model.wrap_surface()) j=0;
while (ls_it!=LevelSets.end()) {
ls_it->max(*boolop_ls);
if (j>0) ls_it->min(*ls_it_old);
ls_it->prune();
ls_it->segment();
++ls_it;
}
if (Model.invert() && Model.levelset()>=0) boolop_ls->invert(); //Invert again so that the original levelset is not changed
}
//Write one output if there is any output time or there is final output
if(!(!ProcessParameter.output_times.empty() || ProcessParameter.final_output)) return;
{
std::ostringstream oss;
oss << "Writing output " << output_info.output_counter;
//oss << " (time = " << RelativeTime << ")...";
msg::print_start(oss.str());
}
typename LevelSetsType::iterator it=LevelSets.begin();
for (unsigned int i=0;i<LevelSets.size();i++) {
it->prune();
if (Parameter.print_dx) {
std::ostringstream oss;
oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".dx";
#ifdef VERBOSE
msg::print_message("print dx");
#endif
write_explicit_surface_opendx(*it,oss.str());
}
if (Parameter.print_vtk) {
std::ostringstream oss;
oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".vtk";
#ifdef VERBOSE
msg::print_message("print vtk");
#endif
write_explicit_surface_vtk(*it,oss.str());
}
if (Parameter.print_lvst) {
std::ostringstream oss;
oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".lvst";
#ifdef VERBOSE
msg::print_message("print lvst");
#endif
it->export_levelset(oss.str(), Parameter.bits_per_distance);
}
it++;
}
output_info.output_counter++;
msg::print_done();
}
//Topography simulation - execute a topography changing process according to required model and parameters
template <class LevelSetsType, class ModelType, class ParameterType, class ProcessParameterType, class OutputInfoType> void ExecuteProcess(
LevelSetsType& LevelSets,
const ModelType& Model,
const ParameterType& Parameter,
const ProcessParameterType& ProcessParameter,
OutputInfoType & output_info,
std::vector<double>& Coverages//,
// std::vector<double> Rates//,
// int step_cycle
) {
const int D=LevelSetsType::value_type::dimensions;
const std::vector<double> & OutputTimes=ProcessParameter.output_times; //vector of times when output will be recorded
std::vector<double>::const_iterator OutputTimesIter = OutputTimes.begin();
//std::lower_bound(OutputTimes.begin(), OutputTimes.end(), AbsoluteTime);
//----------------------------------------------------------------------------------------------------------------------------------------
// while (LevelSets.size()>1) {
// LevelSets.pop_back();
// }
// typedef typename LevelSetsType::value_type LevelSetType;
// LevelSets.push_front(LevelSetType(LevelSets.back().grid(), 0, Parameter.open_boundary, !Parameter.open_boundary_negative));
//----------------------------------------------------------------------------------------------------------------------------------------
int init_cycles=ProcessParameter.StartIterationCycles; //number of initial iteration cycles
int rec_cycles=ProcessParameter.IterationCycles; //number of subsequent iteration cycles
geom::cells<ParameterType::Dimension> Cells;
// std::vector<double> Coverages(std::max(LevelSets.back().num_active_pts()* Model.CoverageStorageSize,1u),0.);
std::vector<double> Rates(1,0);
std::vector<double> NormalVectors;
std::vector<double> DistancesToReceiver;
std::vector<unsigned int> PointMaterials;
std::vector<bool> Connectivities;
std::vector<bool> Visibilities;
//time statistics
const std::string TimeStatFileName=Parameter.output_path+"StatisticsTimes.cvs";
std::ofstream f;
//unsigned int LineNumber;
if (Parameter.print_statistics) {
if(!std::ifstream(TimeStatFileName.c_str())) {
#ifdef VERBOSE
msg::print_message("Print Header in StatisticsTimes.cvs");
#endif
f.open(TimeStatFileName.c_str());
f << "Time for expansion" <<";";
f << "Time for normal vector calc." <<";";
f << "Determining materials" <<";";
f << "Determining connectivities" <<";";
f << "Reduced graph num vertices" <<";";
f << "num componenets" <<";";
f << "Time for smoothing" <<";";
f << "Determining visibilities" <<";";
f << "Setup active cells" <<";";
f << "Setup partition" <<";";
f << "Rate calculation" <<";";
f << "Memory Ray Tracing Data Structure"<<";";
f << "Level set time integration" <<";";
f << "Output" <<";";
f << "Time for Output" <<";";
f << "Total time step excl. Output" <<";";
f << "Total time step incl. Output" <<";"; //TODO
f << "Chosen time step" <<";"; //TODO
f << "Time" <<";"; //TODO
f << "Left Time" <<std::endl;
f.close();
}
}
const double & ProcessTime = ProcessParameter.ProcessTime;
double RelativeTime=0;
//while ((OutputTimesIter!=OutputTimes.end()) && (RelativeTime>*OutputTimesIter)) ++OutputTimesIter;
#ifdef VERBOSE
msg::print_message("Start loop over time");
#endif
while(true) {
// std::vector<double>& Coverages_temp = Coverages;
double TimeTotalExclOutput=-my::time::GetTime();
double TimeTotalInclOutput=-my::time::GetTime();
double TimeExpansion=0;
double TimeNormals=0;
double TimeMaterials=0;
double TimeCells=0;
double TimePartition=0;
double TimeRates=0;
double TimeTimeIntegration=0;
double TimeOutput=0;
double TimeConnectivities=0;
double TimeVisibilities=0;
double TimeSmoothing=0;
double ray_tracing_memory=0;
unsigned int graph_size=0;
unsigned int num_components=0;
bool MakeOutput=false;
if (OutputTimesIter!=OutputTimes.end()) {
assert(RelativeTime<=*OutputTimesIter);
if (RelativeTime==*OutputTimesIter) {
MakeOutput=true;
OutputTimesIter++;
}
}
//if ((RelativeTime==EndTime) && (ProcessParameter.final_output)) MakeOutput=true;
//if ((RelativeTime==StartTime) && (ProcessParameter.initial_output)) MakeOutput=true;
if (!MakeOutput) if (RelativeTime==ProcessTime) break;
//###########################
// smooth surface level set
//###########################
if (ProcessParameter.smoothing_material_level>0) {
#ifdef VERBOSE
msg::print_message("smoothing");
#endif
TimeSmoothing-=my::time::GetTime();
double time_step;
int dummy;
int counter=0;
do {
time_step=lvlset::time_integrate(
LevelSets,
dummy,
lvlset::SMOOTHING_SCHEME(ProcessParameter.smoothing_material_level, ProcessParameter.smoothing_max_curvature, ProcessParameter.smoothing_min_curvature),
Parameter.cfl_condition,
std::numeric_limits<double>::max(),
Coverages,
Model.CoverageStorageSize);
counter++;
} while (time_step!=std::numeric_limits<double>::max() && counter < ProcessParameter.smoothing_max_iterations);
if (time_step!=std::numeric_limits<double>::max()) {
msg::print_message("maximum number of iterations reached during smoothing operation");
}
TimeSmoothing+=my::time::GetTime();
}
/*
//Output statistics for level sets
if (Parameter.print_statistics) {
TimeTotalExclOutput+=my::time::GetTime();
int i=0;
for (typename LevelSetsType::iterator it=LevelSets.begin();it!=LevelSets.end();++it) {
std::ostringstream tmp;
tmp << Parameter.output_path << "StatisticsLevelSet" << i << ".cvs";
lvlset::misc::PrintStatistics(*it, tmp.str());
i++;
}
TimeTotalExclOutput-=my::time::GetTime();
}
*/
if (Model.ReemissionIsMaterialDependent) {
#ifdef VERBOSE
msg::print_message("determine top most layer");
#endif
TimeMaterials-=my::time::GetTime();
DetermineTopMostLayer(LevelSets, PointMaterials);
TimeMaterials+=my::time::GetTime();
}
if (Model.CalculateConnectivities) {
#ifdef VERBOSE
msg::print_message("calculate connectivities");
#endif
TimeConnectivities-=my::time::GetTime();
std::pair<unsigned int, unsigned int> x=CalculateConnectivities(LevelSets.back(), Connectivities, Parameter.open_boundary_negative);
graph_size=x.first;
num_components=x.second;
TimeConnectivities+=my::time::GetTime();
}
if (Model.CalculateVisibilities) {
#ifdef VERBOSE
msg::print_message("calculate visibilities");
#endif
TimeVisibilities-=my::time::GetTime();
CalculateVisibilities(LevelSets.back(), Visibilities, Parameter.open_boundary, Parameter.open_boundary_negative);
TimeVisibilities+=my::time::GetTime();
}
if ((Model.CalculateNormalVectors) || (Model.NumberOfParticleTypes>0)){
#ifdef VERBOSE
msg::print_message("expansion");
#endif
TimeExpansion-=my::time::GetTime();
LevelSets.back().expand(3);
TimeExpansion+=my::time::GetTime();
#ifdef VERBOSE
msg::print_message("normal vector calculation");
#endif
TimeNormals-=my::time::GetTime();
calc::CalculateNormalVectors(LevelSets.back(), NormalVectors, DistancesToReceiver, Parameter.open_boundary, Parameter.open_boundary_negative, Parameter.receptor_radius, lvlset::vec<double,D>(Parameter.default_disc_orientation));
TimeNormals+=my::time::GetTime();
}
double MaxStep=0;
if (Model.NumberOfParticleTypes>0) {
#ifdef VERBOSE
msg::print_message("start monte carlo");
#endif
std::vector<lvlset::vec<int,ParameterType::Dimension > > CellCoordinates;
TimeExpansion-=my::time::GetTime();
LevelSets.back().add_voxel_corners();
TimeExpansion+=my::time::GetTime();
TimeCells-=my::time::GetTime();
calc::SetupCells(LevelSets.back(),Cells, CellCoordinates, NormalVectors, DistancesToReceiver, Parameter.receptor_radius);
TimeCells+=my::time::GetTime();
typedef typename calc::PartitionTraits<ParameterType> tmp_type;
#ifdef COMPILE_PARTITION_NEIGHBOR_LINKS_ARRAYS
if (ProcessParameter.partition_data_structure==partition::NEIGHBOR_LINKS_ARRAYS) {
partition::NeighborLinksArrays<tmp_type> Partition;
TimePartition-=my::time::GetTime();
Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda);
TimePartition+=my::time::GetTime();
ray_tracing_memory=Partition.get_memory();
if (Parameter.print_statistics) {
TimeTotalExclOutput+=my::time::GetTime();
Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs");
TimeTotalExclOutput-=my::time::GetTime();
}
TimeRates-=my::time::GetTime();
do {
calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime);
//std::cout << "RelativeTime = " << RelativeTime << "\n";
calc::UpdateCoverages(Rates, Coverages, Model, MaxStep);//, RelativeTime);
// //std::cout << "MaxStep = " << MaxStep << "\n";
init_cycles--;
} while (init_cycles>=0);
init_cycles=rec_cycles;
TimeRates+=my::time::GetTime();
}
#endif
#ifdef COMPILE_PARTITION_FULL_GRID
if (ProcessParameter.partition_data_structure==partition::FULL_GRID) {
partition::FullGrid<tmp_type> Partition;
TimePartition-=my::time::GetTime();
Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda);
TimePartition+=my::time::GetTime();
ray_tracing_memory=Partition.get_memory();
if (Parameter.print_statistics) {
TimeTotalExclOutput+=my::time::GetTime();
Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs");
TimeTotalExclOutput-=my::time::GetTime();
}
TimeRates-=my::time::GetTime();
do {
calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime);
calc::UpdateCoverages(Rates, Coverages, Model, MaxStep);//, RelativeTime);
init_cycles--;
} while (init_cycles>=0);
init_cycles=rec_cycles;
TimeRates+=my::time::GetTime();
}
#endif
#ifdef COMPILE_UP_DOWN_LINKED_TREE
if (ProcessParameter.partition_data_structure==partition::UP_DOWN_LINKED_TREE) {
partition::UpDownLinkTree<tmp_type> Partition;
TimePartition-=my::time::GetTime();
Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda);
TimePartition+=my::time::GetTime();
ray_tracing_memory=Partition.get_memory();
if (Parameter.print_statistics) {
TimeTotalExclOutput+=my::time::GetTime();
Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs");
TimeTotalExclOutput-=my::time::GetTime();
}
TimeRates-=my::time::GetTime();
do {
calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime);
calc::UpdateCoverages(Rates, Coverages, Model, MaxStep);//, RelativeTime);
init_cycles--;
} while (init_cycles>=0);
init_cycles=rec_cycles;
TimeRates+=my::time::GetTime();
}
#endif
}
//#######################################
// output
//#######################################
TimeTotalExclOutput+=my::time::GetTime();
TimeOutput-=my::time::GetTime();
if (MakeOutput) {
#ifdef VERBOSE
msg::print_message("make output");
#endif
DataAccessClass<ModelType, ParameterType::Dimension> Data( Model,
&Coverages[0],
&Rates[0],
&NormalVectors[0],
PointMaterials,
Connectivities,
Visibilities,
ProcessParameter.print_velocities || Parameter.print_velocities,
ProcessParameter.print_coverages || Parameter.print_coverages,
ProcessParameter.print_rates || Parameter.print_rates,
ProcessParameter.print_materials || Parameter.print_materials
);
{
std::ostringstream oss;
oss << "Writing output " << output_info.output_counter;
oss << " (time = " << RelativeTime << ")...";
msg::print_start(oss.str());
}
typename LevelSetsType::iterator it=LevelSets.begin();
for (unsigned int i=0;i<LevelSets.size();i++) {
it->prune();
if (Parameter.print_dx) {
std::ostringstream oss;
oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".dx";
#ifdef VERBOSE
msg::print_message("print dx");
#endif
if (i!=LevelSets.size()-1) {
write_explicit_surface_opendx(*it,oss.str());
} else {
write_explicit_surface_opendx(*it,oss.str(), Data);
}
}
if (Parameter.print_vtk) {
std::ostringstream oss;
oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".vtk";
#ifdef VERBOSE
msg::print_message("print vtk");
#endif
if (i!=LevelSets.size()-1) {
write_explicit_surface_vtk(*it,oss.str());
} else {
write_explicit_surface_vtk(*it,oss.str(), Data);
}
}
if (Parameter.print_lvst) {
std::ostringstream oss;
oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".lvst";
#ifdef VERBOSE
msg::print_message("print lvst");
#endif
it->export_levelset(oss.str(), Parameter.bits_per_distance);
}
it++;
}
output_info.output_counter++;
msg::print_done();
}
TimeOutput+=my::time::GetTime();
TimeTotalExclOutput-=my::time::GetTime();
// //std::cout << "Relative Time: " << RelativeTime << "\n";
bool is_finished=(RelativeTime==ProcessTime);
//#######################################
// time integration
//#######################################
#ifdef VERBOSE
msg::print_message("time integration");
#endif
double time_step=0;
if (!is_finished) {
//determine next time stop
double NextTimeStop=std::min(ProcessTime, std::min(RelativeTime+ProcessParameter.MaxTimeStep,RelativeTime+MaxStep));
if (OutputTimesIter!=OutputTimes.end()) NextTimeStop=std::min(NextTimeStop, *OutputTimesIter);
double MaxTimeStep=NextTimeStop-RelativeTime;
// //std::cout << "MaxTimeStep = " << MaxTimeStep << "\n";
if (ProcessParameter.FiniteDifferenceScheme==ENGQUIST_OSHER_1ST_ORDER) {
VelocityClass2<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities);
LevelSetsType& LevelSets_temp=LevelSets;
TimeExpansion-=my::time::GetTime();
LevelSets_temp.back().expand(3);
TimeExpansion+=my::time::GetTime();
TimeTimeIntegration-=my::time::GetTime();
time_step=lvlset::time_integrate(
LevelSets_temp,
Velocities,
lvlset::ENGQUIST_OSHER_SV_1ST_ORDER,
Parameter.cfl_condition,
MaxTimeStep,
Coverages,
Model.CoverageStorageSize);
// if (time_step == MaxTimeStep) {
// LevelSets.back().expand(3);
// LevelSets=LevelSets_temp;
// } else {
// continue;
// }
TimeTimeIntegration+=my::time::GetTime();
} else if (ProcessParameter.FiniteDifferenceScheme==ENGQUIST_OSHER_2ND_ORDER) {
VelocityClass2<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities);
TimeExpansion-=my::time::GetTime();
LevelSets.back().expand(5);
TimeExpansion+=my::time::GetTime();
TimeTimeIntegration-=my::time::GetTime();
time_step=lvlset::time_integrate(
LevelSets,
Velocities,
lvlset::ENGQUIST_OSHER_SV_2ND_ORDER,
Parameter.cfl_condition,
MaxTimeStep,
Coverages,
Model.CoverageStorageSize);
TimeTimeIntegration+=my::time::GetTime();
} else if (ProcessParameter.FiniteDifferenceScheme==LAX_FRIEDRICHS_1ST_ORDER) { //TODO
VelocityClass<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities);
TimeExpansion-=my::time::GetTime();
LevelSets.back().expand(3);
TimeExpansion+=my::time::GetTime();
TimeTimeIntegration-=my::time::GetTime();
time_step=lvlset::time_integrate(
LevelSets,
Velocities,
lvlset::LAX_FRIEDRICHS_SCALAR_1ST_ORDER(ProcessParameter.LaxFriedrichsDissipationCoefficient),
Parameter.cfl_condition,
MaxTimeStep,
Coverages,
Model.CoverageStorageSize);
TimeTimeIntegration+=my::time::GetTime();
} else assert(0);
if (time_step>=MaxTimeStep) {
assert(time_step==MaxTimeStep);
time_step=MaxTimeStep;
RelativeTime=NextTimeStop;
} else {
RelativeTime+=time_step;
}
}
TimeTotalExclOutput+=my::time::GetTime();
TimeTotalInclOutput+=my::time::GetTime();
//#######################################
// print statistics
//#######################################
if (Parameter.print_statistics) {
#ifdef VERBOSE
msg::print_message("print statistics");
#endif
f.open(TimeStatFileName.c_str(),std::ios_base::app);
f<<TimeExpansion <<";";
f<<TimeNormals <<";";
f<<TimeMaterials <<";";
f<<TimeConnectivities <<";";
f<<graph_size <<";";
f<<num_components <<";";
f<<TimeSmoothing <<";";
f<<TimeVisibilities <<";";
f<<TimeCells <<";";
f<<TimePartition <<";";
f<<TimeRates <<";";
f<<ray_tracing_memory <<";";
f<<TimeTimeIntegration <<";";
f<<MakeOutput <<";";
f<<TimeOutput <<";";
f<<TimeTotalExclOutput <<";";
f<<TimeTotalInclOutput <<";";
f<<time_step <<";";
f<<RelativeTime <<";";
f<<(ProcessTime-RelativeTime) << std::endl;
f.close();
}
if (is_finished) break;
}
}
///Includes loop over full process time to run the simulation.
template <class LevelSetsType, class ModelType, class ParameterType, class ProcessParameterType, class OutputInfoType> void ExecuteProcess(
LevelSetsType& LevelSets,
const ModelType& Model,
const ParameterType& Parameter,
const ProcessParameterType& ProcessParameter,
OutputInfoType & output_info
) {
const int D=LevelSetsType::value_type::dimensions;
const std::vector<double> & OutputTimes=ProcessParameter.output_times; //vector of times when output will be recorded
const std::vector<double> & OutputVolume=ProcessParameter.output_volume; //vector of times for volume output
std::vector<double>::const_iterator OutputTimesIter = OutputTimes.begin();
std::vector<double>::const_iterator OutputVolumeIter = OutputVolume.begin();
//std::lower_bound(OutputTimes.begin(), OutputTimes.end(), AbsoluteTime);
//----------------------------------------------------------------------------------------------------------------------------------------
// while (LevelSets.size()>1) {
// LevelSets.pop_back();
// }
// typedef typename LevelSetsType::value_type LevelSetType;
// LevelSets.push_front(LevelSetType(LevelSets.back().grid(), 0, Parameter.open_boundary, !Parameter.open_boundary_negative));
//----------------------------------------------------------------------------------------------------------------------------------------
int init_cycles=ProcessParameter.StartIterationCycles; //number of initial iteration cycles
int rec_cycles=ProcessParameter.IterationCycles; //number of subsequent iteration cycles
geom::cells<ParameterType::Dimension> Cells;
std::vector<double> Coverages(std::max(LevelSets.back().num_active_pts()* Model.CoverageStorageSize,1u),0.);
std::vector<double> Rates(1,0);
std::vector<double> NormalVectors;
std::vector<double> DistancesToReceiver;
std::vector<unsigned int> PointMaterials;
std::vector<bool> Connectivities;
std::vector<bool> Visibilities;
//time statistics
const std::string TimeStatFileName=Parameter.output_path + "StatisticsTimes.cvs";
std::ofstream f;
//unsigned int LineNumber;
if (Parameter.print_statistics) {
if(!std::ifstream(TimeStatFileName.c_str())) {
#ifdef VERBOSE
msg::print_message("Print Header in StatisticsTimes.cvs");
#endif
f.open(TimeStatFileName.c_str());
f << "Time for expansion" <<";";
f << "Time for normal vector calc." <<";";
f << "Determining materials" <<";";
f << "Determining connectivities" <<";";
f << "Reduced graph num vertices" <<";";
f << "num componenets" <<";";
f << "Time for smoothing" <<";";
f << "Determining visibilities" <<";";
f << "Setup active cells" <<";";
f << "Setup partition" <<";";
f << "Rate calculation" <<";";
f << "Memory Ray Tracing Data Structure"<<";";
f << "Level set time integration" <<";";
f << "Output" <<";";
f << "Time for Output" <<";";
f << "Total time step excl. Output" <<";";
f << "Total time step incl. Output" <<";"; //TODO
f << "Chosen time step" <<";"; //TODO
f << "Time" <<";"; //TODO
f << "Left Time" <<std::endl;
f.close();
}
}
const double & ProcessTime = ProcessParameter.ProcessTime;
double RelativeTime=0;
//while ((OutputTimesIter!=OutputTimes.end()) && (RelativeTime>*OutputTimesIter)) ++OutputTimesIter;
#ifdef VERBOSE
msg::print_message("Start loop over time");
#endif
while(true) {
double TimeTotalExclOutput=-my::time::GetTime();
double TimeTotalInclOutput=-my::time::GetTime();
double TimeExpansion=0;
double TimeNormals=0;
double TimeMaterials=0;
double TimeCells=0;
double TimePartition=0;
double TimeRates=0;
double TimeTimeIntegration=0;
double TimeOutput=0;
double TimeConnectivities=0;
double TimeVisibilities=0;
double TimeSmoothing=0;
double ray_tracing_memory=0;
unsigned int graph_size=0;
unsigned int num_components=0;
bool MakeOutput=false;
if (OutputTimesIter!=OutputTimes.end()) {
assert(RelativeTime<=*OutputTimesIter);
if (RelativeTime==*OutputTimesIter) {
MakeOutput=true;
OutputTimesIter++;
}
}
//VOLUME OUTPUT
bool VolumeOutput=false;
if(OutputVolumeIter!=OutputVolume.end()){
assert(RelativeTime<=*OutputVolumeIter);
if(RelativeTime==*OutputVolumeIter){
VolumeOutput=true;
OutputVolumeIter++;
}
}
//if ((RelativeTime==EndTime) && (ProcessParameter.final_output)) MakeOutput=true;
//if ((RelativeTime==StartTime) && (ProcessParameter.initial_output)) MakeOutput=true;
if (!MakeOutput && !VolumeOutput) if (RelativeTime==ProcessTime) break;
//###########################
// smooth surface level set
//###########################
if (ProcessParameter.smoothing_material_level>0) {
#ifdef VERBOSE
msg::print_message("smoothing");
#endif
TimeSmoothing-=my::time::GetTime();
double time_step;
int dummy;
int counter=0;
do {
time_step=lvlset::time_integrate(
LevelSets,
dummy,
lvlset::SMOOTHING_SCHEME(ProcessParameter.smoothing_material_level, ProcessParameter.smoothing_max_curvature, ProcessParameter.smoothing_min_curvature),
Parameter.cfl_condition,
std::numeric_limits<double>::max(),
Coverages,
Model.CoverageStorageSize);
counter++;
} while (time_step!=std::numeric_limits<double>::max() && counter < ProcessParameter.smoothing_max_iterations);
if (time_step!=std::numeric_limits<double>::max()) {
msg::print_message("maximum number of iterations reached during smoothing operation");
}
TimeSmoothing+=my::time::GetTime();
}
/*
//Output statistics for level sets
if (Parameter.print_statistics) {
TimeTotalExclOutput+=my::time::GetTime();
int i=0;
for (typename LevelSetsType::iterator it=LevelSets.begin();it!=LevelSets.end();++it) {
std::ostringstream tmp;
tmp << Parameter.output_path << "StatisticsLevelSet" << i << ".cvs";
lvlset::misc::PrintStatistics(*it, tmp.str());
i++;
}
TimeTotalExclOutput-=my::time::GetTime();
}
*/
if (Model.ReemissionIsMaterialDependent) {
#ifdef VERBOSE
msg::print_message("determine top most layer");
#endif
TimeMaterials-=my::time::GetTime();
DetermineTopMostLayer(LevelSets, PointMaterials);
TimeMaterials+=my::time::GetTime();
}
if (Model.CalculateConnectivities) {
#ifdef VERBOSE
msg::print_message("calculate connectivities");
#endif
TimeConnectivities-=my::time::GetTime();
std::pair<unsigned int, unsigned int> x=CalculateConnectivities(LevelSets.back(), Connectivities, Parameter.open_boundary_negative);
graph_size=x.first;
num_components=x.second;
TimeConnectivities+=my::time::GetTime();
}
if (Model.CalculateVisibilities) {
#ifdef VERBOSE
msg::print_message("calculate visibilities");
#endif
TimeVisibilities-=my::time::GetTime();
CalculateVisibilities(LevelSets.back(), Visibilities, Parameter.open_boundary, Parameter.open_boundary_negative);
TimeVisibilities+=my::time::GetTime();
}
if ((Model.CalculateNormalVectors) || (Model.NumberOfParticleTypes>0)){
#ifdef VERBOSE
msg::print_message("expansion");
#endif
TimeExpansion-=my::time::GetTime();
LevelSets.back().expand(3);
TimeExpansion+=my::time::GetTime();
#ifdef VERBOSE
msg::print_message("normal vector calculation");
#endif
TimeNormals-=my::time::GetTime();
calc::CalculateNormalVectors(LevelSets.back(), NormalVectors, DistancesToReceiver, Parameter.open_boundary, Parameter.open_boundary_negative, Parameter.receptor_radius, lvlset::vec<double,D>(Parameter.default_disc_orientation));
TimeNormals+=my::time::GetTime();
}
if (Model.NumberOfParticleTypes>0) {
#ifdef VERBOSE
msg::print_message("start monte carlo");
#endif
std::vector<lvlset::vec<int,ParameterType::Dimension > > CellCoordinates;
TimeExpansion-=my::time::GetTime();
LevelSets.back().add_voxel_corners();
TimeExpansion+=my::time::GetTime();
TimeCells-=my::time::GetTime();
calc::SetupCells(LevelSets.back(),Cells, CellCoordinates, NormalVectors, DistancesToReceiver, Parameter.receptor_radius);
TimeCells+=my::time::GetTime();
typedef typename calc::PartitionTraits<ParameterType> tmp_type;
#ifdef COMPILE_PARTITION_NEIGHBOR_LINKS_ARRAYS
if (ProcessParameter.partition_data_structure==partition::NEIGHBOR_LINKS_ARRAYS) {
partition::NeighborLinksArrays<tmp_type> Partition;
TimePartition-=my::time::GetTime();
Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda);
TimePartition+=my::time::GetTime();
ray_tracing_memory=Partition.get_memory();
if (Parameter.print_statistics) {
TimeTotalExclOutput+=my::time::GetTime();
Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs");
TimeTotalExclOutput-=my::time::GetTime();
}
TimeRates-=my::time::GetTime();
do {
// std::cout << "calculate rates!\n";
calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime);
// std::cout << "update coverages!\n";
calc::UpdateCoverages(Rates, Coverages, Model);
init_cycles--;
} while (init_cycles>=0);
init_cycles=rec_cycles;
TimeRates+=my::time::GetTime();
}
#endif
#ifdef COMPILE_PARTITION_FULL_GRID
if (ProcessParameter.partition_data_structure==partition::FULL_GRID) {
partition::FullGrid<tmp_type> Partition;
TimePartition-=my::time::GetTime();
Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda);
TimePartition+=my::time::GetTime();
ray_tracing_memory=Partition.get_memory();
if (Parameter.print_statistics) {
TimeTotalExclOutput+=my::time::GetTime();
Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs");
TimeTotalExclOutput-=my::time::GetTime();
}
TimeRates-=my::time::GetTime();
do {
calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime);
calc::UpdateCoverages(Rates, Coverages, Model);
init_cycles--;
} while (init_cycles>=0);
init_cycles=rec_cycles;
TimeRates+=my::time::GetTime();
}
#endif
#ifdef COMPILE_UP_DOWN_LINKED_TREE
if (ProcessParameter.partition_data_structure==partition::UP_DOWN_LINKED_TREE) {
partition::UpDownLinkTree<tmp_type> Partition;
TimePartition-=my::time::GetTime();
Partition.Setup(0, Cells.size(), CellCoordinates, LevelSets.back().grid().boundary_conditions(),ProcessParameter.partition_splitting_strategy,ProcessParameter.partition_surface_area_heuristic_lambda);
TimePartition+=my::time::GetTime();
ray_tracing_memory=Partition.get_memory();
if (Parameter.print_statistics) {
TimeTotalExclOutput+=my::time::GetTime();
Partition.PrintStatistics(Parameter.output_path+"StatisiticsPartition.cvs");
TimeTotalExclOutput-=my::time::GetTime();
}
TimeRates-=my::time::GetTime();
do {
calc::CalculateRates(Model,Parameter,Partition,LevelSets.back(),NormalVectors,DistancesToReceiver,Coverages,Rates,PointMaterials,Cells,RelativeTime);
calc::UpdateCoverages(Rates, Coverages, Model);
init_cycles--;
} while (init_cycles>=0);
init_cycles=rec_cycles;
TimeRates+=my::time::GetTime();
}
#endif
}
//#######################################
// output
//#######################################
TimeTotalExclOutput+=my::time::GetTime();
TimeOutput-=my::time::GetTime();
if (MakeOutput) {
#ifdef VERBOSE
msg::print_message("make output");
#endif
DataAccessClass<ModelType, ParameterType::Dimension> Data( Model,
&Coverages[0],
&Rates[0],
&NormalVectors[0],
PointMaterials,
Connectivities,
Visibilities,
ProcessParameter.print_velocities || Parameter.print_velocities,
ProcessParameter.print_coverages || Parameter.print_coverages,
ProcessParameter.print_rates || Parameter.print_rates,
ProcessParameter.print_materials || Parameter.print_materials
);
{
std::ostringstream oss;
oss << "Writing output " << output_info.output_counter;
oss << " (time = " << RelativeTime << ")...";
msg::print_start(oss.str());
}
typename LevelSetsType::iterator it=LevelSets.begin();
for (unsigned int i=0;i<LevelSets.size();i++) {
//for each levelset remove non opposite signed neighbors before outputting it to a file
it->prune();
if (Parameter.print_dx) {
std::ostringstream oss;
oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".dx";
#ifdef VERBOSE
msg::print_message("print dx");
#endif
if (i!=LevelSets.size()-1) {
write_explicit_surface_opendx(*it,oss.str());
} else {
write_explicit_surface_opendx(*it,oss.str(), Data);
}
}
if (Parameter.print_vtk) {
std::ostringstream oss;
oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".vtk";
#ifdef VERBOSE
msg::print_message("print vtk");
#endif
if (i!=LevelSets.size()-1) {
write_explicit_surface_vtk(*it,oss.str());
} else {
write_explicit_surface_vtk(*it,oss.str(), Data);
}
}
if (Parameter.print_lvst) {
std::ostringstream oss;
oss << Parameter.output_path<< output_info.file_name <<"_" << i << "_" << output_info.output_counter << ".lvst";
#ifdef VERBOSE
msg::print_message("print lvst");
#endif
it->export_levelset(oss.str(), Parameter.bits_per_distance);
}
it++;
}
if(!VolumeOutput) output_info.output_counter++;
msg::print_done();
}
if(VolumeOutput){
if(D<3) std::cout << "WARNING: Volume Output is only possible in 3D! Not printing output..." << std::endl;
else{
{
std::ostringstream oss;
oss << "Writing volume " << output_info.output_counter;
oss << " (time = " << RelativeTime << ")...";
msg::print_start(oss.str());
}
//make box around whole simulation domain
typename LevelSetsType::value_type boundaryBox(LevelSets.begin()->grid());
//Determine corners of box
lvlset::vec<int, D> start(std::numeric_limits<int>::max()), end(std::numeric_limits<int>::min());
for(int i=0; i<D; ++i){
for(typename LevelSetsType::iterator it=LevelSets.begin(); it!=LevelSets.end(); ++it){
if(it->num_active_pts() == 0) continue; //ignore empty levelsets
if(boundaryBox.grid().boundary_conditions(i)==lvlset::INFINITE_BOUNDARY){
start[i] = std::min(start[i], it->get_min_runbreak(i));
end[i] = std::max(end[i], it->get_max_runbreak(i));
}else{
start[i] = std::min(start[i], boundaryBox.grid().min_grid_index(i));
end[i] = std::max(end[i], boundaryBox.grid().max_grid_index(i));
}
}
}
//make a box around simulation domain to create border
lvlset::make_box(boundaryBox, start, end);
int counter=0;
for(typename LevelSetsType::iterator it=LevelSets.begin(); it!=LevelSets.end(); ++it){
typename LevelSetsType::value_type LS(*it);
if(Parameter.output_volume_extract_single_materials){
LS.invert(); // invert LS for xor
for(typename LevelSetsType::iterator dummy_it=LevelSets.begin(); dummy_it!=it; ++dummy_it){
LS.min(*dummy_it);
}
LS.invert(); //invert back for real output
}
LS.max(boundaryBox); // Logic AND(intersect) boundary with levelset
LS.prune(); //remove unnecessary points
//print surface
std::ostringstream oss;
oss << Parameter.output_path << "Volume" << output_info.file_name <<"_" << counter << "_" << output_info.output_counter;
if(Parameter.print_dx){
oss << ".dx";
write_explicit_surface_opendx(LS,oss.str());
}
if(Parameter.print_vtk){
oss << ".vtk";
write_explicit_surface_vtk(LS,oss.str());
}
if(Parameter.print_lvst){
oss << ".lvst";
LS.export_levelset(oss.str(), Parameter.bits_per_distance);
}
//lvlset::write_explicit_surface_vtk(LS, oss.str());
++counter;
}
output_info.output_counter++;
msg::print_done();
}
}
TimeOutput+=my::time::GetTime();
TimeTotalExclOutput-=my::time::GetTime();
// //std::cout << "Relative Time: " << RelativeTime << "\n";
bool is_finished=(RelativeTime==ProcessTime);
//#######################################
// time integration
//#######################################
#ifdef VERBOSE
msg::print_message("time integration");
#endif
double time_step=0;
if (!is_finished) {
//determine next time stop
double NextTimeStop=std::min(ProcessTime, RelativeTime+ProcessParameter.MaxTimeStep);
if (OutputTimesIter!=OutputTimes.end()) NextTimeStop=std::min(NextTimeStop, *OutputTimesIter);
double MaxTimeStep=NextTimeStop-RelativeTime;
if (ProcessParameter.FiniteDifferenceScheme==ENGQUIST_OSHER_1ST_ORDER) {
VelocityClass2<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities);
TimeExpansion-=my::time::GetTime();
LevelSets.back().expand(3);
TimeExpansion+=my::time::GetTime();
TimeTimeIntegration-=my::time::GetTime();
time_step=lvlset::time_integrate(
LevelSets,
Velocities,
lvlset::ENGQUIST_OSHER_SV_1ST_ORDER,
Parameter.cfl_condition,
MaxTimeStep,
Coverages,
Model.CoverageStorageSize);
TimeTimeIntegration+=my::time::GetTime();
} else if (ProcessParameter.FiniteDifferenceScheme==ENGQUIST_OSHER_2ND_ORDER) {
VelocityClass2<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities);
TimeExpansion-=my::time::GetTime();
LevelSets.back().expand(5);
TimeExpansion+=my::time::GetTime();
TimeTimeIntegration-=my::time::GetTime();
time_step=lvlset::time_integrate(
LevelSets,
Velocities,
lvlset::ENGQUIST_OSHER_SV_2ND_ORDER,
Parameter.cfl_condition,
MaxTimeStep,
Coverages,
Model.CoverageStorageSize);
TimeTimeIntegration+=my::time::GetTime();
} else if (ProcessParameter.FiniteDifferenceScheme==LAX_FRIEDRICHS_1ST_ORDER) { //TODO
VelocityClass<ModelType, ParameterType::Dimension> Velocities(Model, &NormalVectors[0], &Coverages[0], &Rates[0], Connectivities, Visibilities);
TimeExpansion-=my::time::GetTime();
LevelSets.back().expand(3);
TimeExpansion+=my::time::GetTime();
TimeTimeIntegration-=my::time::GetTime();
time_step=lvlset::time_integrate(
LevelSets,
Velocities,
lvlset::LAX_FRIEDRICHS_SCALAR_1ST_ORDER(ProcessParameter.LaxFriedrichsDissipationCoefficient),
Parameter.cfl_condition,
MaxTimeStep,
Coverages,
Model.CoverageStorageSize);
TimeTimeIntegration+=my::time::GetTime();
} else assert(0);
if (time_step>=MaxTimeStep) {
assert(time_step==MaxTimeStep);
time_step=MaxTimeStep;
RelativeTime=NextTimeStop;
} else {
RelativeTime+=time_step;
}
}
TimeTotalExclOutput+=my::time::GetTime();
TimeTotalInclOutput+=my::time::GetTime();
//#######################################
// print statistics
//#######################################
if (Parameter.print_statistics) {
#ifdef VERBOSE
msg::print_message("print statistics");
#endif
f.open(TimeStatFileName.c_str(),std::ios_base::app);
f<<TimeExpansion <<";";
f<<TimeNormals <<";";
f<<TimeMaterials <<";";
f<<TimeConnectivities <<";";
f<<graph_size <<";";
f<<num_components <<";";
f<<TimeSmoothing <<";";
f<<TimeVisibilities <<";";
f<<TimeCells <<";";
f<<TimePartition <<";";
f<<TimeRates <<";";
f<<ray_tracing_memory <<";";
f<<TimeTimeIntegration <<";";
f<<MakeOutput <<";";
f<<TimeOutput <<";";
f<<TimeTotalExclOutput <<";";
f<<TimeTotalInclOutput <<";";
f<<time_step <<";";
f<<RelativeTime <<";";
f<<(ProcessTime-RelativeTime) << std::endl;
f.close();
}
if (is_finished) break;
}
}
}
#endif /*PROCESS_H_*/
|
timer.h | #ifndef SPLATT_TIMER_H
#define SPLATT_TIMER_H
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include <time.h>
#include <stddef.h>
#include <stdbool.h>
#ifdef __MACH__
#include <mach/mach.h>
#include <mach/mach_time.h>
#endif
/******************************************************************************
* STRUCTURES
*****************************************************************************/
/**
* @brief Represents a wall-clock timer.
*/
typedef struct
{
bool running;
double seconds;
double start;
double stop;
} sp_timer_t;
/**
* @brief timer_id provides easy indexing into timers[].
*/
typedef enum
{
TIMER_LVL0, /* LEVEL 0 */
TIMER_ALL,
TIMER_CPD,
TIMER_REORDER,
TIMER_CONVERT,
TIMER_LVL1, /* LEVEL 1 */
TIMER_MTTKRP,
TIMER_INV,
TIMER_FIT,
TIMER_MATMUL,
TIMER_ATA,
TIMER_MATNORM,
TIMER_IO,
TIMER_PART,
TIMER_LVL2, /* LEVEL 2 */
#ifdef SPLATT_USE_MPI
TIMER_MPI,
TIMER_MPI_IDLE,
TIMER_MPI_COMM,
TIMER_MPI_ATA,
TIMER_MPI_REDUCE,
TIMER_MPI_PARTIALS,
TIMER_MPI_NORM,
TIMER_MPI_UPDATE,
TIMER_MPI_FIT,
/* timer max */
TIMER_MTTKRP_MAX,
TIMER_MPI_MAX,
TIMER_MPI_IDLE_MAX,
TIMER_MPI_COMM_MAX,
#endif
TIMER_SPLATT,
TIMER_GIGA,
TIMER_DFACTO,
TIMER_TTBOX,
TIMER_SORT,
TIMER_TILE,
TIMER_MISC,
TIMER_NTIMERS /* LEVEL N */
} timer_id;
/* globals */
int timer_lvl;
sp_timer_t timers[TIMER_NTIMERS];
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
#define init_timers splatt_init_timers
/**
* @brief Call timer_reset() on all of timers[].
*/
void init_timers(void);
#define report_times splatt_report_times
/**
* @brief Output a summary of all used timers.
*/
void report_times(void);
#define timer_inc_verbose splatt_timer_inc_verbose
/**
* @brief Increment timer verbosity to the next level;
*/
void timer_inc_verbose(void);
/**
* @brief Return the number of seconds since an unspecified time (e.g., Unix
* epoch). This is accomplished with a high-resolution monotonic timer,
* suitable for performance timing.
*
* @return The number of seconds.
*/
static inline double monotonic_seconds()
{
#ifdef __MACH__
/* OSX */
static mach_timebase_info_data_t info;
static double seconds_per_unit;
if(seconds_per_unit == 0) {
#pragma omp critical
{
mach_timebase_info(&info);
seconds_per_unit = (info.numer / info.denom) / 1e9;
}
}
return seconds_per_unit * mach_absolute_time();
#else
/* Linux systems */
struct timespec ts;
// clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec + ts.tv_nsec * 1e-9;
#endif
}
/**
* @brief Reset all fields of a sp_timer_t.
*
* @param timer The timer to reset.
*/
static inline void timer_reset(sp_timer_t * const timer)
{
timer->running = false;
timer->seconds = 0;
timer->start = 0;
timer->stop = 0;
}
/**
* @brief Start a sp_timer_t. NOTE: this does not reset the timer.
*
* @param timer The timer to start.
*/
static inline void timer_start(sp_timer_t * const timer)
{
if(!timer->running) {
timer->running = true;
timer->start = monotonic_seconds();
}
}
/**
* @brief Stop a sp_timer_t and update its time.
*
* @param timer The timer to stop.
*/
static inline void timer_stop(sp_timer_t * const timer)
{
timer->running = false;
timer->stop = monotonic_seconds();
timer->seconds += timer->stop - timer->start;
}
/**
* @brief Give a sp_timer_t a fresh start by resetting and starting it.
*
* @param timer The timer to refresh.
*/
static inline void timer_fstart(sp_timer_t * const timer)
{
timer_reset(timer);
timer_start(timer);
}
#endif
|
VGG-16_CPU_cifar.c | /*
Pretrained VGG-16 Convolutional Neural Network in C language and OpenMP API
GitHUB Page: https://github.com/jcanore/vgg16
Author: ZFTurbo/jocare
Compilation: gcc -O3 VGG-16_CPU_cifar.c -lm -fopenmp -o VGG-16_CPU_cifar
Usage: VGG-16_CPU_cifar <weights_path> <file_with_list_of_images> <output file> <output convolution features (optional)>
Example: VGG-16_CPU_cifar ../../weights/weights.txt" ../../img/image_list.txt results_imagenet_conv.txt 1
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <omp.h>
#include "sparse.h"
double get_seconds(struct timeval tStart, struct timeval tEnd) {
return ((tEnd.tv_sec - tStart.tv_sec) * 1000000 + tEnd.tv_usec - tStart.tv_usec) / 1.e6;
}
#define SIZE 32
#define CONV_SIZE 3
#define CONV_LEVELS 13
//#define _CRT_SECURE_NO_WARNINGS 1
// precompile variables
// assure default values if nothing provided
#ifndef SPARSE_CONVOLUTIONS
#define SPARSE_CONVOLUTIONS 0 // default dense convolutions
#endif // SPARSE_CONVOLUTIONS
#ifndef FIRST_CONV_SPARSE
#define FIRST_CONV_SPARSE 0 // this is almost never 1
#endif // FIRST_CONV_SPARSE
#ifndef SPARSE_FULLY_CONNECTED
#define SPARSE_FULLY_CONNECTED 0 // this is not implemented yet
#endif // SPARSE_FULLY_CONNECTED
#ifndef FISHER_PRUNING
#define FISHER_PRUNING 0 // set for fisher pruning, all previous variables changed to dense
#endif // FISHER_PRUNING
#ifndef NUMBER_OF_THREADS
#define NUMBER_OF_THREADS 1 // number of threads to run on
//#define NUMBER_OF_THREADS omp_get_num_procs() - 1
#endif // NUMBER_OF_THREADS
/****************************************************************************************************************************/
// Weights and image block START
float ***image;
#if FISHER_PRUNING
#define SPARSE_CONVOLUTIONS 0 // force dense convolutions
/* ORIGINAL EXPERIMENTS
int cshape[13][4] = {
{ 56, 3, CONV_SIZE, CONV_SIZE },
{ 62, 56, CONV_SIZE, CONV_SIZE },
{ 121, 62, CONV_SIZE, CONV_SIZE },
{ 127, 121, CONV_SIZE, CONV_SIZE },
{ 232, 127, CONV_SIZE, CONV_SIZE },
{ 229, 232, CONV_SIZE, CONV_SIZE },
{ 183, 229, CONV_SIZE, CONV_SIZE },
{ 134, 183, CONV_SIZE, CONV_SIZE },
{ 101, 134, CONV_SIZE, CONV_SIZE },
{ 70, 101, CONV_SIZE, CONV_SIZE },
{ 60, 70, CONV_SIZE, CONV_SIZE },
{ 64, 60, CONV_SIZE, CONV_SIZE },
{ 79, 64, CONV_SIZE, CONV_SIZE }
};
int dshape[2][2]= {
{ 79, 47 },
{ 47, 10}
};
*/
// FIXED 90% ACCURACY EXPERIMENTS
int cshape[13][4] = {
{ 64, 3, CONV_SIZE, CONV_SIZE },
{ 62, 64, CONV_SIZE, CONV_SIZE },
{ 128, 62, CONV_SIZE, CONV_SIZE },
{ 127, 128, CONV_SIZE, CONV_SIZE },
{ 256, 127, CONV_SIZE, CONV_SIZE },
{ 246, 256, CONV_SIZE, CONV_SIZE },
{ 230, 246, CONV_SIZE, CONV_SIZE },
{ 265, 230, CONV_SIZE, CONV_SIZE },
{ 199, 265, CONV_SIZE, CONV_SIZE },
{ 162, 199, CONV_SIZE, CONV_SIZE },
{ 146, 162, CONV_SIZE, CONV_SIZE },
{ 155, 146, CONV_SIZE, CONV_SIZE },
{ 183, 155, CONV_SIZE, CONV_SIZE }
};
int dshape[2][2]= {
{ 183, 1164 },
{ 164, 10}
};
#else // FISHER_PRUNING
int cshape[CONV_LEVELS][4] = {
{ 64, 3, CONV_SIZE, CONV_SIZE },
{ 64, 64, CONV_SIZE, CONV_SIZE },
{ 128, 64, CONV_SIZE, CONV_SIZE },
{ 128, 128, CONV_SIZE, CONV_SIZE },
{ 256, 128, CONV_SIZE, CONV_SIZE },
{ 256, 256, CONV_SIZE, CONV_SIZE },
{ 256, 256, CONV_SIZE, CONV_SIZE },
{ 512, 256, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE },
{ 512, 512, CONV_SIZE, CONV_SIZE }
};
int dshape[2][2]= {
{ 512, 512 },
{ 512, 10}
};
#endif // FISHER_PRUNING
float *****wc; // weights convolution
float **bc; // biases convolution
float ***wd; // weights dense
float **bd; // biases dense
#if SPARSE_CONVOLUTIONS
// sparse conv
csr_t ****wc_sparse;
#endif // SPARSE_CONVOLUTIONS
// Blocks for intermediate convolutions
int mem_block_shape[3] = {512, SIZE, SIZE}; // not optimal defining 512 statically
float ***mem_block1;
float ***mem_block2;
// Blocks for dense flatten layers
int mem_block_dense_shape = { 512 * 1 * 1 }; // size of layer before the fully connected
float *mem_block1_dense;
float *mem_block2_dense;
// Weights and image block END
/****************************************************************************************************************************/
void reset_mem_block(float ***mem) {
int i, j, k;
for (i = 0; i < mem_block_shape[0]; i++) {
for (j = 0; j < mem_block_shape[1]; j++) {
for (k = 0; k < mem_block_shape[2]; k++) {
mem[i][j][k] = 0.0;
}
}
}
}
/****************************************************************************************************************************/
void reset_mem_block_dense(float *mem) {
int i;
for (i = 0; i < mem_block_dense_shape; i++) {
mem[i] = 0.0;
}
}
/****************************************************************************************************************************/
void init_memory() {
int i, j, k, l;
// Init image memory
image = malloc(3 * sizeof(float**));
for (i = 0; i < 3; i++) {
image[i] = malloc(SIZE * sizeof(float*));
for (j = 0; j < SIZE; j++) {
image[i][j] = malloc(SIZE * sizeof(float));
}
}
#if SPARSE_CONVOLUTIONS
wc_sparse = (csr_t****) malloc(CONV_LEVELS * sizeof(csr_t***));
for (l = 0; l < CONV_LEVELS; l++) {
wc_sparse[l] = (csr_t***) malloc(cshape[l][0] * sizeof(csr_t**));
for (i = 0; i < cshape[l][0]; i++) {
wc_sparse[l][i] = (csr_t**) malloc(cshape[l][1] * sizeof(csr_t*));
}
}
// wc memory allocated below will be freed in read_weights if SPARSE_CONVOLUTIONS
#endif // SPARSE_CONVOLUTIONS
// Init convolution weights
wc = malloc(CONV_LEVELS * sizeof(float****));
bc = malloc(CONV_LEVELS * sizeof(float*));
for (l = 0; l < CONV_LEVELS; l++) {
wc[l] = malloc(cshape[l][0] * sizeof(float***));
for (i = 0; i < cshape[l][0]; i++) {
wc[l][i] = malloc(cshape[l][1] * sizeof(float**));
for (j = 0; j < cshape[l][1]; j++) {
wc[l][i][j] = malloc(cshape[l][2] * sizeof(float*));
for (k = 0; k < cshape[l][2]; k++) {
wc[l][i][j][k] = malloc(cshape[l][3] * sizeof(float));
}
}
}
bc[l] = malloc(cshape[l][0] * sizeof(float));
}
// Init dense weights
wd = malloc(2 * sizeof(float**));
bd = malloc(2 * sizeof(float*));
for (l = 0; l < 2; l++) {
wd[l] = malloc(dshape[l][0] * sizeof(float*));
for (i = 0; i < dshape[l][0]; i++) {
wd[l][i] = malloc(dshape[l][1] * sizeof(float));
}
bd[l] = malloc(dshape[l][1] * sizeof(float));
}
// Init mem_blocks // jocare: this size could be dynamic
mem_block1 = malloc(mem_block_shape[0] * sizeof(float**));
mem_block2 = malloc(mem_block_shape[0] * sizeof(float**));
for (i = 0; i < mem_block_shape[0]; i++) {
mem_block1[i] = malloc(mem_block_shape[1] * sizeof(float*));
mem_block2[i] = malloc(mem_block_shape[1] * sizeof(float*));
for (j = 0; j < mem_block_shape[1]; j++) {
mem_block1[i][j] = malloc(mem_block_shape[2] * sizeof(float));
mem_block2[i][j] = malloc(mem_block_shape[2] * sizeof(float));
}
}
// reset_mem_block(mem_block1);
// reset_mem_block(mem_block2);
// Init mem blocks dense
mem_block1_dense = calloc(mem_block_dense_shape, sizeof(float));
mem_block2_dense = calloc(mem_block_dense_shape, sizeof(float));
}
/****************************************************************************************************************************/
void free_memory() {
int i, j, k, l;
// Free image memory
for (i = 0; i < 3; i++) {
for (j = 0; j < SIZE; j++) {
free(image[i][j]);
}
free(image[i]);
}
free(image);
// Free convolution weights
for (l = 0; l < CONV_LEVELS; l++) {
#if SPARSE_CONVOLUTIONS
for (i = 0; i < cshape[l][0]; i++) {
for (j = 0; j < cshape[l][1]; j++) {
free(wc_sparse[l][i][j]);
}
free(wc_sparse[l][i]);
}
free(wc_sparse[l]);
#else
for (i = 0; i < cshape[l][0]; i++) {
for (j = 0; j < cshape[l][1]; j++) {
for (k = 0; k < cshape[l][2]; k++) {
free(wc[l][i][j][k]);
}
free(wc[l][i][j]);
}
free(wc[l][i]);
}
free(wc[l]);
#endif
free(bc[l]);
}
#if SPARSE_CONVOLUTIONS
free(wc_sparse);
#else
free(wc);
#endif // SPARSE_CONVOLUTIONS
free(bc);
// Free dense weights
for (l = 0; l < 2; l++) {
for (i = 0; i < dshape[l][0]; i++) {
free(wd[l][i]);
}
free(wd[l]);
free(bd[l]);
}
free(wd);
free(bd);
// Free memblocks
for (i = 0; i < mem_block_shape[0]; i++) {
for (j = 0; j < mem_block_shape[1]; j++) {
free(mem_block1[i][j]);
free(mem_block2[i][j]);
}
free(mem_block1[i]);
free(mem_block2[i]);
}
free(mem_block1);
free(mem_block2);
free(mem_block1_dense);
free(mem_block2_dense);
}
/****************************************************************************************************************************/
void read_weights(char *in_file, int lvls) {
float dval;
int i, j, k, l, z;
FILE *iin;
int total_lvls_read = 0;
// printf("\nin_file es: %s\n\n", in_file);
iin = fopen64(in_file, "r");
if (iin == NULL) {
printf("Weights file %s absent\n", in_file);
exit(1);
}
// Reading convolution weights (store them flipped from begining)
for (z = 0; z < CONV_LEVELS; z++) {
// if (total_lvls_read >= lvls && lvls != -1)
// break;
printf("Read conv block %d weights\n", z);
for (i = 0; i < cshape[z][0]; i++) {
for (j = 0; j < cshape[z][1]; j++) {
for (k = 0; k < cshape[z][2]; k++) {
for (l = 0; l < cshape[z][3]; l++) {
fscanf(iin, "%f", &dval);
//wc[z][i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = dval;
wc[z][i][j][k][l] = dval;
}
}
}
}
for (i = 0; i < cshape[z][0]; i++) {
fscanf(iin, "%f", &dval);
// printf("dval: %.4f ", dval);
bc[z][i] = dval;
}
total_lvls_read += 1;
}
if (total_lvls_read >= lvls && lvls != -1)
return;
//int count=0;
// Reading dense weights
for (z = 0; z < 2; z++) {
// int count=0;
// if (total_lvls_read >= lvls && lvls != -1)
// break;
printf("Read dense block %d weights\n", z);
// for (i = 0; i < dshape[z][1]; i++) { // 512, 512; 512, 10
// for (j = 0; j < dshape[z][0]; j++) {
for (i = 0; i < dshape[z][0]; i++) {
for (j = 0; j < dshape[z][1]; j++) {
fscanf(iin, "%f", &dval);
wd[z][i][j] = dval;
// if (count < 100) {
// printf("%f\n", dval);
// count++;
// }
}
}
// for (i = 0; i < dshape[z][1]; i++) {
// for (j = 0; j < dshape[z][0]; j++) {
// if(z==1) {
// printf("weight[%i][%i]: %f\n",i,j, wd[z][i][j]) ;
// }
// }
// }
// //dshape[z][0]
// if(z==1){
// for (i = 0; i < 10; i++) {
// for (j = 0; j < 512; j++) {
// printf("weight[%i][%i]: %f\n",i, j, wd[1][i][j]);
// }
// }
// }
for (i = 0; i < dshape[z][1]; i++) {
fscanf(iin, "%f", &dval);
bd[z][i] = dval;
}
// total_lvls_read += 1;
}
fclose(iin);
/////////////**************** SPARSE ************/////////////////////////////
#if SPARSE_CONVOLUTIONS
// convert to sparse format
for (l = 0; l < CONV_LEVELS; l++)
for (i = 0; i < cshape[l][0]; i++)
for (j = 0; j < cshape[l][1]; j++) {
//printf("going for %d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]);
csr_t* a = dense2csr2(cshape[l][2], cshape[l][3], wc[l][i][j]);
//print_csr(a);
wc_sparse[l][i][j] = a;
//printf("done..%d/%d, %d/%d, %d/%d\n", l, 13, i, cshape[l][0], j, cshape[l][1]);
}
// Free convolution weights
#if FIRST_CONV_SPARSE == 0
l = 0;
// allocate new memory for first conv and copy from wc
float *****wc_first_conv = (float*****) malloc(1 * sizeof(float****));
wc_first_conv[l] = (float****) malloc(cshape[l][0] * sizeof(float***));
int k1, k2;
for (i = 0; i < cshape[l][0]; i++) {
wc_first_conv[l][i] = (float***) malloc(cshape[l][1] * sizeof(float**));
for (j = 0; j < cshape[l][1]; j++) {
wc_first_conv[l][i][j] = (float**) malloc(cshape[l][2] * sizeof(float*));
for (k1 = 0; k1 < cshape[l][2]; k1++) {
wc_first_conv[l][i][j][k1] = (float*) malloc(cshape[l][3] * sizeof(float));
for (k2 = 0; k2 < cshape[l][3]; k2++)
wc_first_conv[l][i][j][k1][k2] = wc[l][i][j][k1][k2];
}
}
}
#endif // FIRST_CONV_SPARSE == 0
// free up all dense conv layer representation
for (l = 0; l < CONV_LEVELS; l++) {
for (i = 0; i < cshape[l][0]; i++) {
for (j = 0; j < cshape[l][1]; j++) {
for (k = 0; k < cshape[l][2]; k++) {
free(wc[l][i][j][k]);
}
free(wc[l][i][j]);
}
free(wc[l][i]);
}
free(wc[l]);
}
free(wc);
#if FIRST_CONV_SPARSE == 0
// replace old wc pointer with the data for only first conv layer created above
wc = wc_first_conv;
#endif // FIRST_CONV_SPARSE == 0
#endif // SPARSE_CONVOLUTIONS
}
/****************************************************************************************************************************/
void read_image(char *in_file) {
int i, j, l;
FILE *iin;
float dval;
iin = fopen(in_file, "r");
if (iin == NULL) {
printf("Image file %s absent\n", in_file);
exit(1);
}
/* Reading image */
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
for (l = 0; l < 3; l++) {
fscanf(iin, "%f", &dval);
image[l][i][j] = dval;
// printf("i[%d][%d][%d]:%f\n", i, j, l, dval);
}
}
}
}
/****************************************************************************************************************************/
void normalize_image() {
int i, j, l;
// t.sub_(m).div_(s)
// float coef[3] = { 103.939, 116.779, 123.68 };
float coef[2][3] = {{0.4914, 0.4822, 0.4465}, {0.2023, 0.1994, 0.2010}};
for (l = 0; l < 3; l++) {
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
// image[l][i][j] -= coef[l];
image[l][i][j] /= 255;
// image[l][i][j] -= coef[1][l];
// image[l][i][j] = image[l][i][j] / coef[2][l];
}
}
}
}
/****************************************************************************************************************************/
void convolution_3_x_3(float **matrix, float **kernel, float **out, int size, int stride) {
int i, j;
float sum;
// float zeropad[SIZE + 2][SIZE + 2] = { 0.0 };
float zeropad[size+2][size+2];
memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
zeropad[i + 1][j + 1] = matrix[i][j];
}
}
// float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2];
// for (i = 0; i < (size + 2); ++i)
// zeropad[i] = (float*) malloc ((size + 2) * sizeof(float));
// //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float)));
// // padding with zeros
// for (i = 0; i < size + 2; ++i) {
// zeropad[i][0] = 0;
// zeropad[i][size + 1] = 0;
// }
// for (i = 1; i < size + 1; ++i) {
// zeropad[0][i] = 0;
// zeropad[size + 1][i] = 0;
// }
// // copying input value
// for (i = 0; i < size; ++i) {
// for (j = 0; j < size; ++j) {
// zeropad[i + 1][j + 1] = matrix[i][j];
// }
// }
for (i = 0; i < size; i=i+stride) {
for (j = 0; j < size; j=j+stride) {
sum =
zeropad[i ][j ] * kernel[0][0] +
zeropad[i ][j + 1] * kernel[0][1] +
zeropad[i ][j + 2] * kernel[0][2] +
zeropad[i + 1][j ] * kernel[1][0] +
zeropad[i + 1][j + 1] * kernel[1][1] +
zeropad[i + 1][j + 2] * kernel[1][2] +
zeropad[i + 2][j ] * kernel[2][0] +
zeropad[i + 2][j + 1] * kernel[2][1] +
zeropad[i + 2][j + 2] * kernel[2][2];
out[i][j] += sum;
}
}
// for (i = 0; i < (size + 2); ++i)
// free(zeropad[i]);
// free(zeropad);
}
/****************************************************************************************************************************/
void convolution_3_x_3_sparse(float **matrix, csr_t* kernel, float **out, int size) {
// printf("sparse\n");
int i, j;
// float zeropad[SIZE + 2][SIZE + 2] = { 0.0 };
float zeropad[size+2][size+2];
memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float))); // jack
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
zeropad[i + 1][j + 1] = matrix[i][j];
}
}
// float** zeropad = (float**) malloc((size + 2) * sizeof(float*)); //[size+2][size+2];
// for (i = 0; i < (size + 2); ++i)
// zeropad[i] = (float*) malloc ((size + 2) * sizeof(float));
// //memset(zeropad, 0, ((size+2)*(size+2)*sizeof(float)));
// // padding with zeros
// for (i = 0; i < size + 2; ++i) {
// zeropad[i][0] = 0;
// zeropad[i][size + 1] = 0;
// }
// for (i = 1; i < size + 1; ++i) {
// zeropad[0][i] = 0;
// zeropad[size + 1][i] = 0;
// }
// // copying input value
// for (i = 0; i < size; ++i) {
// for (j = 0; j < size; ++j) {
// zeropad[i + 1][j + 1] = matrix[i][j];
// }
// }
// // convolution
// for (i = 0; i < size; ++i) {
// for (j = 0; j < size; ++j) {
// out[i][j] += s_csr_conv(kernel, zeropad, i, j);
// }
// }
// for (i = 0; i < (size + 2); ++i)
// free(zeropad[i]);
// free(zeropad);
int k,l;
float sum;
// convolution
for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) {
//out[i][j] += s_csr_conv(kernel, zeropad, i, j);
sum = 0;
for (k = 0; k < kernel->nrows; ++k) {
// for every nonzero element in this row
for (l = kernel->rowptr[k]; l < kernel->rowptr[k + 1]; ++l) {
// Scale the corresponding row of B with the nonzero value of A
float value = kernel->values[l];
int col = kernel->colind[l];
sum += value * zeropad[i + k][j + col];
}
}
out[i][j] += sum;
}
}
}
/****************************************************************************************************************************/
void add_bias_and_relu(float **out, float bs, int size) {
int i, j;
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
out[i][j] += bs;
if (out[i][j] < 0)
out[i][j] = 0.0;
// printf("%.12lf\n", out[i][j]);
}
}
}
/****************************************************************************************************************************/
void add_bias_and_relu_flatten(float *out, float *bs, int size, int relu) {
int i;
for (i = 0; i < size; i++) {
out[i] += bs[i];
// printf("%f\n", out[i]);
if (relu == 1) {
if (out[i] < 0)
out[i] = 0.0;
}
}
}
/****************************************************************************************************************************/
float max_of_4(float a, float b, float c, float d) {
if (a >= b && a >= c && a >= d) {
return a;
}
if (b >= c && b >= d) {
return b;
}
if (c >= d) {
return c;
}
return d;
}
/****************************************************************************************************************************/
void maxpooling(float **out, int size) {
int i, j;
for (i = 0; i < size; i+=2) {
for (j = 0; j < size; j+=2) {
out[i / 2][j / 2] = max_of_4(out[i][j], out[i + 1][j], out[i][j + 1], out[i + 1][j + 1]);
}
}
}
/****************************************************************************************************************************/
void flatten(float ***in, float *out, int sh0, int sh1, int sh2) {
int i, j, k, total = 0;
for (i = 0; i < sh0; i++) {
for (j = 0; j < sh1; j++) {
for (k = 0; k < sh2; k++) {
out[total] = in[i][j][k];
total += 1;
}
}
}
}
/****************************************************************************************************************************/
void dense(float *in, float **weights, float *out, int sh_in, int sh_out) {
int i, j;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < sh_out; i++) {
float sum = 0.0;
for (j = 0; j < sh_in; j++) {
sum += in[j] * weights[i][j];
}
out[i] = sum;
}
}
/****************************************************************************************************************************/
void softmax(float *out, int sh_out) {
int i;
float max_val, sum;
max_val = out[0];
for (i = 1; i < sh_out; i++) {
if (out[i] > max_val)
max_val = out[i];
}
sum = 0.0;
for (i = 0; i < sh_out; i++) {
out[i] = exp(out[i] - max_val);
sum += out[i];
}
for (i = 0; i < sh_out; i++) {
out[i] /= sum;
}
}
/****************************************************************************************************************************/
void dump_memory_structure_conv(float ***mem, int sh0, int sh1, int sh2) {
int i, j, k;
for (i = 0; i < sh0; i++) {
for (j = 0; j < sh1; j++) {
for (k = 0; k < sh2; k++) {
printf("%.12lf\n", mem[i][j][k]);
}
}
}
}
/****************************************************************************************************************************/
void dump_memory_structure_conv_to_file(float ***mem, int sh0, int sh1, int sh2) {
FILE *out;
int i, j, k;
out = fopen("debug_c.txt", "w");
for (i = 0; i < sh0; i++) {
for (j = 0; j < sh1; j++) {
for (k = 0; k < sh2; k++) {
fprintf(out, "%.12lf\n", mem[i][j][k]);
}
}
}
fclose(out);
}
/****************************************************************************************************************************/
void dump_memory_structure_dense(float *mem, int sh0) {
int i;
for (i = 0; i < sh0; i++) {
printf("%.12lf\n", mem[i]);
}
}
/****************************************************************************************************************************/
void dump_memory_structure_dense_to_file(float *mem, int sh0) {
FILE *out;
int i;
out = fopen("debug_c.txt", "w");
for (i = 0; i < sh0; i++) {
fprintf(out, "%.12lf\n", mem[i]);
}
fclose(out);
}
/****************************************************************************************************************************/
void dump_image() {
int i, j, k;
for (i = 0; i < 3; i++) {
for (j = 0; j < SIZE; j++) {
for (k = 0; k < SIZE; k++) {
printf("%.12lf\n", image[i][j][k]);
}
}
}
}
/****************************************************************************************************************************/
void output_predictions(FILE *out, int only_convolution, int size, int cur_size) {
int i;
int c=0;
if (only_convolution == 1) {
for (i = 0; i < size * cur_size * cur_size; i++) {
fprintf(out, "%g ", mem_block1_dense[i]);
}
}
else {
double maximum=-1;
for (i = 0; i < dshape[1][1]; i++) {
fprintf(out, "%g ", mem_block1_dense[i]);
if(mem_block1_dense[i]>maximum){
maximum=mem_block1_dense[i];
c=i+1;
}
}
fprintf(out, "\n");
printf("-------------------------\n");
printf("This image depicts class: %d\n",c);
}
}
/****************************************************************************************************************************/
//void get_VGG16_predict(FILE *out, int only_convolution) {
void get_VGG16_predict(int only_convolution) {
int i, j;
int level, cur_size, cur_stride;
// Init intermediate memory
reset_mem_block(mem_block1);
reset_mem_block(mem_block2);
reset_mem_block_dense(mem_block1_dense);
reset_mem_block_dense(mem_block2_dense);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 1 (Convolution 3 -> 64)
level = 0;
cur_size = SIZE;
cur_stride = 1;
// printf("(cur_size): %d\n", cur_size);
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if FIRST_CONV_SPARSE
convolution_3_x_3_sparse(image[j], wc_sparse[level][i][j], mem_block1[i], cur_size);
#else
convolution_3_x_3(image[j], wc[level][i][j], mem_block1[i], cur_size, cur_stride);
#endif // FIRST_CONV_SPARSE
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block1, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 2 (Convolution 64 -> 64)
level = 1;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], cur_size);
#else
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block2, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 3 (MaxPooling)
#pragma omp parallel for schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
maxpooling(mem_block2[i], cur_size);
}
cur_size /= 2;
// printf("(cur_size): %d\n", cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 4 (Convolution 64 -> 128)
level = 2;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block2[j], wc_sparse[level][i][j], mem_block1[i], cur_size);
#else
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block1, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 5 (Convolution 128 -> 128)
level = 3;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], cur_size);
#else
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block2, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 6 (MaxPooling)
#pragma omp parallel for schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
maxpooling(mem_block2[i], cur_size);
}
cur_size /= 2;
// printf("(cur_size): %d\n", cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 7 (Convolution 128 -> 256)
level = 4;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block2[j], wc_sparse[level][i][j], mem_block1[i], cur_size);
#else
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block1, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 8 (Convolution 256 -> 256)
level = 5;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], cur_size);
#else
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block2, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 9 (Convolution 256 -> 256)
level = 6;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block2[j], wc_sparse[level][i][j], mem_block1[i], cur_size);
#else
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block1, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 10 (MaxPooling)
#pragma omp parallel for schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
maxpooling(mem_block1[i], cur_size);
}
cur_size /= 2;
// printf("(cur_size): %d\n", cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 11 (Convolution 256 -> 512)
level = 7;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], cur_size);
#else
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block2, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 12 (Convolution 512 -> 512)
level = 8;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block2[j], wc_sparse[level][i][j], mem_block1[i], cur_size);
#else
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block1, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 13 (Convolution 512 -> 512)
level = 9;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], cur_size);
#else
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block2, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 14 (MaxPooling)
#pragma omp parallel for schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
maxpooling(mem_block2[i], cur_size);
}
cur_size /= 2;
// printf("(cur_size): %d\n", cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 15 (Convolution 512 -> 512)
level = 10;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block2[j], wc_sparse[level][i][j], mem_block1[i], cur_size);
#else
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block1, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 16 (Convolution 512 -> 512)
level = 11;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block1[j], wc_sparse[level][i][j], mem_block2[i], cur_size);
#else
convolution_3_x_3(mem_block1[j], wc[level][i][j], mem_block2[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block2[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block1);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block2, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 17 (Convolution 512 -> 512)
level = 12;
#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
for (j = 0; j < cshape[level][1]; j++) {
#if SPARSE_CONVOLUTIONS
convolution_3_x_3_sparse(mem_block2[j], wc_sparse[level][i][j], mem_block1[i], cur_size);
#else
convolution_3_x_3(mem_block2[j], wc[level][i][j], mem_block1[i], cur_size, cur_stride);
#endif // SPARSE_CONVOLUTIONS
}
add_bias_and_relu(mem_block1[i], bc[level][i], cur_size);
}
reset_mem_block(mem_block2);
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
// flatten(mem_block1, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 18 (MaxPooling)
#pragma omp parallel for schedule(dynamic,1) num_threads(NUMBER_OF_THREADS)
for (i = 0; i < cshape[level][0]; i++) {
maxpooling(mem_block1[i], cur_size);
}
cur_size /= 2;
// printf("(cur_size): %d\n", cur_size);
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 19 (Flatten)
// printf("level, cshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, cshape[level][0], cur_size, cshape[level][0]*cur_size*cur_size);
flatten(mem_block1, mem_block1_dense, cshape[level][0], cur_size, cur_size);
// output_predictions(out, only_convolution, cshape[level][0], cur_size);
if (only_convolution == 1) {
return;
}
//-------------------------------------------------------------------------------------------------------------------------------
// Layer 20 (Dense)
level = 0;
dense(mem_block1_dense, wd[level], mem_block2_dense, dshape[level][0], dshape[level][1]);
add_bias_and_relu_flatten(mem_block2_dense, bd[level], dshape[level][1], 1);
// printf("level, dshape[level][0], cur_size, lines: %d, %d, %d, %d\n", level, dshape[level][0], cur_size, 1);
// output_predictions(out, only_convolution, dshape[level][0], cur_size);
reset_mem_block_dense(mem_block1_dense);
// Layer 21 (Dense)
level = 1;
dense(mem_block2_dense, wd[level], mem_block1_dense, dshape[level][0], dshape[level][1]);
add_bias_and_relu_flatten(mem_block1_dense, bd[level], dshape[level][1], 1);
softmax(mem_block1_dense, dshape[level][1]);
// dump_memory_structure_dense_to_file(mem_block2_dense, dshape[level][1]);
return;
}
/****************************************************************************************************************************/
char *trimwhitespace(char *str){
char *end;
// Trim leading space
while (isspace((unsigned char)*str)) str++;
if (*str == 0) // All spaces?
return str;
// Trim trailing space
end = str + strlen(str) - 1;
while (end > str && isspace((unsigned char)*end)) end--;
// Write new null terminator
*(end + 1) = 0;
return str;
}
/****************************************************************************************************************************/
int main(int argc, char *argv[]) {
FILE *file_list, *results;
char buf[1024];
struct timeval tStart, tEnd;
double deltaTime;
char *weights_file;
char *image_list_file;
char *output_file;
int lvls = -1;
int only_convolution = 0;
//-----------------------------------------------------------------------
printf("Using %d threads\n", NUMBER_OF_THREADS);
if (argc != 4 && argc != 5) {
printf("Usage: <program.exe> <weights file> <images list file> <output file> <only_convolution [optional]>\n");
return 0;
}
weights_file = argv[1];
//printf("%s\n", weights_file);
image_list_file = argv[2];
output_file = argv[3];
if (argc == 5) {
lvls = 13;
only_convolution = 1;
}
//-----------------------------------------------------------------------
init_memory();
file_list = fopen(image_list_file, "r");
if (file_list == NULL) {
printf("Check file list location: %s\n", image_list_file);
return 1;
}
results = fopen(output_file, "w");
if (results == NULL) {
printf("Couldn't open file for writing: %s\n", output_file);
return 1;
}
gettimeofday(&tStart, NULL);
read_weights(weights_file, lvls);
gettimeofday(&tEnd, NULL);
deltaTime = get_seconds(tStart, tEnd);
printf("Reading weights: %.3lf sec\n", deltaTime);
while (!feof(file_list)) {
fgets(buf, 1024, file_list);
if (strlen(buf) == 0) {
break;
}
// printf("%d\n", strlen(buf));
read_image(trimwhitespace(buf));
// normalize_image();
// dump_image();
gettimeofday(&tStart, NULL);
get_VGG16_predict(only_convolution);
// get_VGG16_predict(results, only_convolution);
gettimeofday(&tEnd, NULL);
deltaTime = get_seconds(tStart, tEnd);
printf("Infer image %s: %.3lf sec\n", buf, deltaTime);
// output_predictions(results, only_convolution);
output_predictions(results, only_convolution, 512, 1);
}
free_memory();
fclose(file_list);
return 0;
}
|
flip_op.h | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <bitset>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
constexpr size_t dim_bitset_size = 64;
template <typename DeviceContext, typename T>
class FlipKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override;
};
template <typename T>
class FlipKernel<platform::CPUDeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* x = ctx.Input<Tensor>("X");
Tensor* out = ctx.Output<Tensor>("Out");
auto flip_dims = ctx.template Attr<std::vector<int>>("axis");
auto x_dims = x->dims();
const int total_dims = x_dims.size();
std::bitset<dim_bitset_size> dim_bitset;
for (size_t i = 0; i < flip_dims.size(); ++i) {
int dim = flip_dims[i];
if (flip_dims[i] < 0) {
dim += total_dims;
}
dim_bitset[dim] = true;
}
auto x_strides = pten::stride(x_dims);
auto numel = x->numel();
const T* x_data = x->data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int64_t i = 0; i < numel; ++i) {
int64_t cur_indices = i;
int64_t rem = 0;
int64_t dst_offset = 0;
for (int d = 0; d < total_dims; ++d) {
int64_t temp = cur_indices;
cur_indices = cur_indices / x_strides[d];
rem = temp - cur_indices * x_strides[d];
dst_offset += dim_bitset[d]
? (x_dims[d] - 1 - cur_indices) * x_strides[d]
: cur_indices * x_strides[d];
cur_indices = rem;
}
out_data[i] = x_data[dst_offset];
}
}
};
} // namespace operators
} // namespace paddle
|
multind.c | /* Copyright 2013-2015 The Regents of the University of California.
* Copyright 2016-2017. Martin Uecker.
* Copyright 2017. Intel Corporation.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2017 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2013 Frank Ong <frankong@berkeley.edu>
* 2017 Michael J. Anderson <michael.j.anderson@intel.com>
*
* Generic operations on multi-dimensional arrays. Most functions
* come in two flavours:
*
* 1. A basic version which takes the number of dimensions, an array
* of long integers specifing the size of each dimension, the pointers
* to the data, and the size of each element and other required parameters.
* The data is assumed to be stored in column-major format.
*
* 2. An extended version which takes an array of long integers which
* specifies the strides for each argument.
*
* All functions should work on CPU and GPU and md_copy can be used
* to copy between CPU and GPU.
*
*/
#define _GNU_SOURCE
#include <string.h>
#include <assert.h>
#include <stdbool.h>
#include <alloca.h>
#include <strings.h>
#include "misc/misc.h"
#include "misc/types.h"
#include "misc/debug.h"
#include "num/optimize.h"
#ifdef USE_CUDA
#include "num/gpuops.h"
#endif
#include "multind.h"
/**
* Generic functions which loops over all dimensions of a set of
* multi-dimensional arrays and calls a given function for each position.
*/
void md_nary(unsigned int C, unsigned int D, const long dim[D], const long* str[C], void* ptr[C], void* data, md_nary_fun_t fun)
{
if (0 == D) {
fun(data, ptr);
return;
}
for (long i = 0; i < dim[D - 1]; i++) {
void* moving_ptr[C];
for (unsigned int j = 0; j < C; j++)
moving_ptr[j] = ptr[j] + i * str[j][D - 1];
md_nary(C, D - 1, dim, str, moving_ptr, data, fun);
}
}
/**
* Generic functions which loops over all dimensions of a set of
* multi-dimensional arrays and calls a given function for each position.
* This functions tries to parallelize over the dimensions indicated
* with flags.
*/
void md_parallel_nary(unsigned int C, unsigned int D, const long dim[D], unsigned long flags, const long* str[C], void* ptr[C], void* data, md_nary_fun_t fun)
{
if (0 == flags) {
md_nary(C, D, dim, str, ptr, data, fun);
return;
}
long dimc[D];
md_select_dims(D, ~flags, dimc, dim);
// Collect all parallel dimensions
int nparallel = 0;
int parallel_b[D];
long parallel_dim[D];
long total_iterations = 1L;
while (0 != flags) {
int b = ffsl(flags & -flags) - 1;
assert(MD_IS_SET(flags, b));
flags = MD_CLEAR(flags, b);
debug_printf(DP_DEBUG4, "Parallelize: %d\n", dim[b]);
parallel_b[nparallel] = b;
parallel_dim[nparallel] = dim[b];
total_iterations *= parallel_dim[nparallel];
nparallel++;
}
#pragma omp parallel for
for (long i = 0; i < total_iterations; i++) {
// Recover place in parallel iteration space
long iter_i[D];
long ii = i;
for (int p = nparallel - 1; p >= 0; p--) {
iter_i[p] = ii % parallel_dim[p];
ii /= parallel_dim[p];
}
void* moving_ptr[C];
for (unsigned int j = 0; j < C; j++) {
moving_ptr[j] = ptr[j];
for(int p = 0; p < nparallel; p++)
moving_ptr[j] += iter_i[p] * str[j][parallel_b[p]];
}
md_nary(C, D, dimc, str, moving_ptr, data, fun);
}
}
static void md_parallel_loop_r(unsigned int D, unsigned int N, const long dim[static N], unsigned int flags, const long pos[static N], void* data, md_loop_fun_t fun)
{
if (0 == D) {
fun(data, pos);
return;
}
D--;
// we need to make a copy because firstprivate needs to see
// an array instead of a pointer
long pos_copy[N];
for (unsigned int i = 0; i < N; i++)
pos_copy[i] = pos[i];
#pragma omp parallel for firstprivate(pos_copy) if ((1 < dim[D]) && (flags & (1 << D)))
for (int i = 0; i < dim[D]; i++) {
pos_copy[D] = i;
md_parallel_loop_r(D, N, dim, flags, pos_copy, data, fun);
}
}
/**
* Generic function which loops over all dimensions and calls a given
* function passing the current indices as argument.
*
* Runs fun(data, position) for all position in dim
*
*/
void md_parallel_loop(unsigned int D, const long dim[static D], unsigned long flags, void* data, md_loop_fun_t fun)
{
long pos[D];
md_parallel_loop_r(D, D, dim, flags, pos, data, fun);
}
static void md_loop_r(unsigned int D, const long dim[D], long pos[D], void* data, md_loop_fun_t fun)
{
if (0 == D) {
fun(data, pos);
return;
}
D--;
for (pos[D] = 0; pos[D] < dim[D]; pos[D]++)
md_loop_r(D, dim, pos, data, fun);
}
/**
* Generic function which loops over all dimensions and calls a given
* function passing the current indices as argument.
*
* Runs fun( data, position ) for all position in dim
*
*/
void md_loop(unsigned int D, const long dim[D], void* data, md_loop_fun_t fun)
{
long pos[D];
md_loop_r(D, dim, pos, data, fun);
}
/**
* Computes the next position. Returns true until last index.
*/
bool md_next(unsigned int D, const long dims[D], unsigned long flags, long pos[D])
{
if (0 == D--)
return false;
if (md_next(D, dims, flags, pos))
return true;
if (MD_IS_SET(flags, D)) {
assert((0 <= pos[D]) && (pos[D] < dims[D]));
if (++pos[D] < dims[D])
return true;
pos[D] = 0;
}
return false;
}
/**
* Returns offset for position in a multidimensional array
*
* return pos[0]*strides[0] + ... + pos[D-1]*strides[D-1]
*
* @param D number of dimensions
* @param dim dimensions array
*/
long md_calc_offset(unsigned int D, const long strides[D], const long position[D])
{
long pos = 0;
for (unsigned int i = 0; i < D; i++)
pos += strides[i] * position[i];
return pos;
}
static long md_calc_size_r(unsigned int D, const long dim[D], size_t size)
{
if (0 == D)
return size;
return md_calc_size_r(D - 1, dim, size * dim[D - 1]);
}
/**
* Returns the number of elements
*
* return dim[0]*dim[1]*...*dim[D-1]
*
* @param D number of dimensions
* @param dim dimensions array
*/
long md_calc_size(unsigned int D, const long dim[D])
{
return md_calc_size_r(D, dim, 1);
}
/**
* Computes the number of smallest dimensions which are stored
* contineously, i.e. can be accessed as a block of memory.
*
*/
unsigned int md_calc_blockdim(unsigned int D, const long dim[D], const long str[D], size_t size)
{
long dist = size;
unsigned int i = 0;
for (i = 0; i < D; i++) {
if (!((str[i] == dist) || (dim[i] == 1)))
break;
dist *= dim[i];
}
return i;
}
/**
* Copy dimensions specified by flags and set remaining dimensions to 1
*
* odims = [ 1 idims[1] idims[2] 1 1 idims[5] ]
*
* @param D number of dimensions
* @param flags bitmask specifying which dimensions to copy
* @param odims output dimensions
* @param idims input dimensions
*/
void md_select_dims(unsigned int D, unsigned long flags, long odims[D], const long idims[D])
{
md_copy_dims(D, odims, idims);
for (unsigned int i = 0; i < D; i++)
if (!MD_IS_SET(flags, i))
odims[i] = 1;
}
/**
* Copy dimensions
*
* odims[i] = idims[i]
*/
void md_copy_dims(unsigned int D, long odims[D], const long idims[D])
{
memcpy(odims, idims, D * sizeof(long));
}
/**
* Copy strides
*
* ostrs[i] = istrs[i]
*/
void md_copy_strides(unsigned int D, long ostrs[D], const long istrs[D])
{
memcpy(ostrs, istrs, D * sizeof(long));
}
/**
* Set all dimensions to value
*
* dims[i] = val
*/
void md_set_dims(unsigned int D, long dims[D], long val)
{
for (unsigned int i = 0; i < D; i++)
dims[i] = val;
}
/**
* returns whether or not @param pos is a valid index of an array of dimension @param dims
*/
bool md_is_index(unsigned int D, const long pos[D], const long dims[D])
{
if (D == 0)
return true;
return ((pos[0] >= 0) && (pos[0] < dims[0]) && md_is_index(D - 1, pos + 1, dims + 1));
}
/**
* return whether some other dimensions are >1
*/
bool md_check_dimensions(unsigned int N, const long dims[N], unsigned int flags)
{
long d[N];
md_select_dims(N, ~flags, d, dims);
return (1 != md_calc_size(N, d));
}
/*
* compute non-trivial (> 1) dims
*/
unsigned long md_nontriv_dims(unsigned int D, const long dims[D])
{
unsigned long flags = 0;
for (unsigned int i = 0; i < D; i++)
if (dims[i] > 1)
flags = MD_SET(flags, i);
return flags;
}
/**
* Set all dimensions to one
*
* dims[i] = 1
*/
void md_singleton_dims(unsigned int D, long dims[D])
{
for (unsigned int i = 0; i < D; i++)
dims[i] = 1;
}
/**
* Set all strides to one
*
* dims[i] = 1
*/
void md_singleton_strides(unsigned int D, long strs[D])
{
for (unsigned int i = 0; i < D; i++)
strs[i] = 0;
}
/**
* Check dimensions for compatibility. Dimensions must be equal or
* where indicated by a set bit in flags one must be equal to one
* in atleast one of the arguments.
*/
bool md_check_compat(unsigned int D, unsigned long flags, const long dim1[D], const long dim2[D])
{
if (0 == D)
return true;
D--;
if ((dim1[D] == dim2[D]) || (MD_IS_SET(flags, D) && ((1 == dim1[D]) || (1 == dim2[D]))))
return md_check_compat(D, flags, dim1, dim2);
return false;
}
void md_merge_dims(unsigned int N, long out_dims[N], const long dims1[N], const long dims2[N])
{
assert(md_check_compat(N, ~0, dims1, dims2));
for (unsigned int i = 0; i < N; i++)
out_dims[i] = (1 == dims1[i]) ? dims2[i] : dims1[i];
}
/**
* dim1 must be bounded by dim2 where a bit is set
*/
bool md_check_bounds(unsigned int D, unsigned long flags, const long dim1[D], const long dim2[D])
{
if (0 == D--)
return true;
if (!MD_IS_SET(flags, D) || (dim1[D] <= dim2[D]))
return md_check_bounds(D, flags, dim1, dim2);
return false;
}
/**
* Set the output's flagged dimensions to the minimum of the two input dimensions
*
* odims = [ MIN(idims1[0],idims2[0] ... MIN(idims1[D-1],idims2[D-1]) ]
*
* @param D number of dimensions
* @param flags bitmask specifying which dimensions to minimize
* @param odims output dimensions
* @param idims1 input 1 dimensions
* @param idims2 input 2 dimensions
*/
void md_min_dims(unsigned int D, unsigned long flags, long odims[D], const long idims1[D], const long idims2[D])
{
for (unsigned int i = 0; i < D; i++)
if (MD_IS_SET(flags, i))
odims[i] = MIN(idims1[i], idims2[i]);
}
/**
* Set the output's flagged dimensions to the maximum of the two input dimensions
*
* odims = [ MAX(idims1[0],idims2[0] ... MAX(idims1[D-1],idims2[D-1]) ]
*
* @param D number of dimensions
* @param flags bitmask specifying which dimensions to maximize
* @param odims output dimensions
* @param idims1 input 1 dimensions
* @param idims2 input 2 dimensions
*/
void md_max_dims(unsigned int D, unsigned long flags, long odims[D], const long idims1[D], const long idims2[D])
{
for (unsigned int i = 0; i < D; i++)
if (MD_IS_SET(flags, i))
odims[i] = MAX(idims1[i], idims2[i]);
}
struct data_s {
size_t size;
#ifdef USE_CUDA
bool use_gpu;
#endif
};
static void nary_clear(struct nary_opt_data_s* opt_data, void* ptr[])
{
struct data_s* data = opt_data->data_ptr;
size_t size = data->size * opt_data->size;
#ifdef USE_CUDA
if (data->use_gpu) {
cuda_clear(size, ptr[0]);
return;
}
#endif
memset(ptr[0], 0, size);
}
/**
* Zero out array (with strides)
*
* ptr[i] = 0
*/
void md_clear2(unsigned int D, const long dim[D], const long str[D], void* ptr, size_t size)
{
const long (*nstr[1])[D] = { (const long (*)[D])str };
#ifdef USE_CUDA
struct data_s data = { size, cuda_ondevice(ptr) };
#else
struct data_s data = { size };
#endif
unsigned long flags = 0;
for (unsigned int i = 0; i < D; i++)
if (0 == str[i])
flags |= MD_BIT(i);
long dim2[D];
md_select_dims(D, ~flags, dim2, dim);
optimized_nop(1, MD_BIT(0), D, dim2, nstr, (void*[1]){ ptr }, (size_t[1]){ size }, nary_clear, &data);
}
/**
* Calculate strides in column-major format
* (smallest index is sequential)
*
* @param D number of dimensions
* @param array of calculates strides
* @param dim array of dimensions
* @param size of a single element
*/
long* md_calc_strides(unsigned int D, long str[D], const long dim[D], size_t size)
{
long old = size;
for (unsigned int i = 0; i < D; i++) {
str[i] = (1 == dim[i]) ? 0 : old;
old *= dim[i];
}
return str;
}
/**
* Zero out array (without strides)
*
* ptr[i] = 0
*
* @param D number of dimensions
* @param dim dimensions array
* @param ptr pointer to data to clear
* @param size sizeof()
*/
void md_clear(unsigned int D, const long dim[D], void* ptr, size_t size)
{
md_clear2(D, dim, MD_STRIDES(D, dim, size), ptr, size);
}
struct strided_copy_s {
long sizes[2];
long ostr;
long istr;
};
#ifdef USE_CUDA
static void nary_strided_copy(void* _data, void* ptr[])
{
struct strided_copy_s* data = _data;
// printf("CUDA 2D copy %ld %ld %ld %ld %ld %ld\n", data->sizes[0], data->sizes[1], data->ostr, data->istr, (long)ptr[0], (long)ptr[1]);
cuda_memcpy_strided(data->sizes, data->ostr, ptr[0], data->istr, ptr[1]);
}
#endif
static void nary_copy(struct nary_opt_data_s* opt_data, void* ptr[])
{
struct data_s* data = opt_data->data_ptr;
size_t size = data->size * opt_data->size;
#ifdef USE_CUDA
if (data->use_gpu) {
cuda_memcpy(size, ptr[0], ptr[1]);
return;
}
#endif
memcpy(ptr[0], ptr[1], size);
}
/**
* Copy array (with strides)
*
* optr[i] = iptr[i]
*/
void md_copy2(unsigned int D, const long dim[D], const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size)
{
#if 0
// this is for a fun comparison between our copy engine and FFTW
extern void fft2(unsigned int D, const long dim[D], unsigned int flags,
const long ostr[D], void* optr, const long istr[D], const void* iptr);
if (sizeof(complex float) == size)
fft2(D, dim, 0, ostr, optr, istr, iptr);
#endif
#ifndef USE_CUDA
struct data_s data = { size };
#else
struct data_s data = { size, cuda_ondevice(optr) || cuda_ondevice(iptr) };
#if 1
long tostr[D];
long tistr[D];
long tdims[D];
md_copy_strides(D, tostr, ostr);
md_copy_strides(D, tistr, istr);
md_copy_dims(D, tdims, dim);
long (*nstr2[2])[D] = { &tostr, &tistr };
int ND = optimize_dims(2, D, tdims, nstr2);
size_t sizes[2] = { size, size };
int skip = min_blockdim(2, ND, tdims, nstr2, sizes);
if (data.use_gpu && (ND - skip == 1)) {
// FIXME: the test was > 0 which would optimize transpose
// but failes in the second cuda_memcpy_strided call
// probably because of alignment restrictions
const long* nstr[2] = { *nstr2[0] + skip, *nstr2[1] + skip };
void* nptr[2] = { optr, (void*)iptr };
long sizes[2] = { md_calc_size(skip, tdims) * size, tdims[skip] };
struct strided_copy_s data = { { sizes[0], sizes[1] } , (*nstr2[0])[skip], (*nstr2[1])[skip] };
skip++;
md_nary(2, ND - skip, tdims + skip , nstr, nptr, &data, &nary_strided_copy);
return;
}
#endif
#endif
const long (*nstr[2])[D] = { (const long (*)[D])ostr, (const long (*)[D])istr };
optimized_nop(2, MD_BIT(0), D, dim, nstr, (void*[2]){ optr, (void*)iptr }, (size_t[2]){ size, size }, nary_copy, &data);
}
/**
* Copy array (without strides)
*
* optr[i] = iptr[i]
*/
void md_copy(unsigned int D, const long dim[D], void* optr, const void* iptr, size_t size)
{
long str[D];
md_calc_strides(D, str, dim, size);
md_copy2(D, dim, str, optr, str, iptr, size);
}
#ifdef USE_CUDA
// copied from flpmath.c
static void* gpu_constant(const void* vp, size_t size)
{
return md_gpu_move(1, (long[1]){ 1 }, vp, size);
}
#endif
/**
* Fill array with value pointed by pointer (with strides)
*
* ptr[i] = iptr[0]
*/
void md_fill2(unsigned int D, const long dim[D], const long str[D], void* ptr, const void* iptr, size_t size)
{
#ifdef USE_CUDA
if (cuda_ondevice(ptr) && (!cuda_ondevice(iptr))) {
void* giptr = gpu_constant(iptr, size);
md_fill2(D, dim, str, ptr, giptr, size);
md_free(giptr);
return;
}
#endif
long istr[D];
md_singleton_strides(D, istr);
md_copy2(D, dim, str, ptr, istr, iptr, size);
}
/**
* Fill array with value pointed by pointer (without strides)
*
* ptr[i] = iptr[0]
*/
void md_fill(unsigned int D, const long dim[D], void* ptr, const void* iptr, size_t size)
{
md_fill2(D, dim, MD_STRIDES(D, dim, size), ptr, iptr, size);
}
struct swap_s {
unsigned int M;
size_t size;
};
static void nary_swap(struct nary_opt_data_s* opt_data, void* ptr[])
{
const struct swap_s* data = opt_data->data_ptr;
size_t size = data->size * opt_data->size;
unsigned int M = data->M;
char* tmp = (size < 32) ? alloca(size) : xmalloc(size);
#ifdef USE_CUDA
assert(!cuda_ondevice(ptr[0]));
assert(!cuda_ondevice(ptr[1]));
#endif
memcpy(tmp, ptr[0], size);
for (unsigned int i = 0; i < M - 1; i++)
memcpy(ptr[i], ptr[i + 1], size);
memcpy(ptr[M - 1], tmp, size);
if (size >= 32)
xfree(tmp);
}
/**
* Swap values between a number of arrays (with strides)
*/
void md_circular_swap2(unsigned int M, unsigned int D, const long dims[D], const long* strs[M], void* ptr[M], size_t size)
{
size_t sizes[M];
for (unsigned int i = 0; i < M; i++)
sizes[i] = size;
struct swap_s data = { M, size };
const long (*nstrs[M])[D];
for (unsigned int i = 0; i < M; i++)
nstrs[i] = (const long (*)[D])strs[i];
optimized_nop(M, (1 << M) - 1, D, dims, nstrs, ptr, sizes, nary_swap, &data);
}
/**
* Swap values between a number of arrays
*/
void md_circular_swap(unsigned M, unsigned int D, const long dims[D], void* ptr[M], size_t size)
{
long strs[M][D];
md_calc_strides(D, strs[0], dims, size);
const long* strp[M];
strp[0] = strs[0];
for (unsigned int i = 1; i < M; i++) {
md_copy_strides(D, strs[i], strs[0]);
strp[i] = strs[i];
}
md_circular_swap2(M, D, dims, strp, ptr, size);
}
/**
* Swap values between two arrays (with strides)
*
* iptr[i] = optr[i] and optr[i] = iptr[i]
*/
void md_swap2(unsigned int D, const long dim[D], const long ostr[D], void* optr, const long istr[D], void* iptr, size_t size)
{
md_circular_swap2(2, D, dim, (const long*[2]){ ostr, istr }, (void*[2]){ optr, iptr }, size);
}
/**
* Swap values between two arrays (without strides)
*
* iptr[i] = optr[i] and optr[i] = iptr[i]
*/
void md_swap(unsigned int D, const long dim[D], void* optr, void* iptr, size_t size)
{
long str[D];
md_calc_strides(D, str, dim, size);
md_swap2(D, dim, str, optr, str, iptr, size);
}
/**
* Move a block from an array to another array (with strides)
*
*/
void md_move_block2(unsigned int D, const long dim[D], const long opos[D], const long odim[D], const long ostr[D], void* optr, const long ipos[D], const long idim[D], const long istr[D], const void* iptr, size_t size)
{
for (unsigned int i = 0; i < D; i++) {
assert(dim[i] <= odim[i]);
assert(dim[i] <= idim[i]);
assert((0 <= opos[i]) && (opos[i] <= odim[i] - dim[i]));
assert((0 <= ipos[i]) && (ipos[i] <= idim[i] - dim[i]));
}
long ioff = md_calc_offset(D, istr, ipos);
long ooff = md_calc_offset(D, ostr, opos);
md_copy2(D, dim, ostr, optr + ooff, istr, iptr + ioff, size);
}
/**
* Move a block from an array to another array (without strides)
*
*/
void md_move_block(unsigned int D, const long dim[D], const long opos[D], const long odim[D], void* optr, const long ipos[D], const long idim[D], const void* iptr, size_t size)
{
md_move_block2(D, dim,
opos, odim, MD_STRIDES(D, odim, size), optr,
ipos, idim, MD_STRIDES(D, idim, size), iptr, size);
}
/**
* Copy a block from an array to another array (with strides)
*
* Block dimensions are min(idim , odim)
*
* if idim[d] > odim[d], then optr[i] = iptr[pos + i] for 0 <= i < odim[d]
*
* if idim[d] < odim[d], then optr[pos + i] = iptr[i] for 0 <= i < idim[d]
*
*/
void md_copy_block2(unsigned int D, const long pos[D], const long odim[D], const long ostr[D], void* optr, const long idim[D], const long istr[D], const void* iptr, size_t size)
{
long dim[D];
long ipos[D];
long opos[D];
for (unsigned int i = 0; i < D; i++) {
assert((idim[i] != odim[i]) || (0 == pos[i]));
dim[i] = MIN(odim[i], idim[i]);
ipos[i] = 0;
opos[i] = 0;
if (idim[i] != dim[i])
ipos[i] = pos[i];
if (odim[i] != dim[i])
opos[i] = pos[i];
}
md_move_block2(D, dim, opos, odim, ostr, optr, ipos, idim, istr, iptr, size);
}
/**
* Copy a block from an array to another array (without strides)
*
* Block dimensions are min(idim , odim)
*
* if idim[d] > odim[d], then optr[i] = iptr[pos + i] for 0 <= i < odim[d]
*
* if idim[d] < odim[d], then optr[pos + i] = iptr[i] for 0 <= i < idim[d]
*
*/
void md_copy_block(unsigned int D, const long pos[D], const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
md_copy_block2(D, pos,
odim, MD_STRIDES(D, odim, size), optr,
idim, MD_STRIDES(D, idim, size), iptr, size);
}
/**
* Resize an array by zero-padding or by truncation at the end.
*
* optr = [iptr 0 0 0 0]
*
*/
void md_resize(unsigned int D, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
long pos[D];
memset(pos, 0, D * sizeof(long));
md_clear(D, odim, optr, size);
md_copy_block(D, pos, odim, optr, idim, iptr, size);
}
/**
* Resize an array by zero-padding or by truncation at both ends symmetrically.
*
* optr = [0 0 iptr 0 0]
*
*/
void md_resize_center(unsigned int D, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
// the definition of the center position corresponds
// to the one used in the FFT.
long pos[D];
for (unsigned int i = 0; i < D; i++)
pos[i] = labs((odim[i] / 2) - (idim[i] / 2));
md_clear(D, odim, optr, size);
md_copy_block(D, pos, odim, optr, idim, iptr, size);
}
/**
* Extract slice from array specified by flags (with strides)
*
* optr = iptr(pos[0], :, pos[2], :, :)
*
*/
void md_slice2(unsigned int D, unsigned long flags, const long pos[D], const long dim[D], const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size)
{
long odim[D];
md_select_dims(D, ~flags, odim, dim);
md_copy_block2(D, pos, odim, ostr, optr, dim, istr, iptr, size);
}
/**
* Extract slice from array specified by flags (with strides)
*
* optr = iptr(pos[0], :, pos[2], :, :)
*
*/
void md_slice(unsigned int D, unsigned long flags, const long pos[D], const long dim[D], void* optr, const void* iptr, size_t size)
{
long odim[D];
md_select_dims(D, ~flags, odim, dim);
md_slice2(D, flags, pos, dim,
MD_STRIDES(D, odim, size), optr,
MD_STRIDES(D, dim, size), iptr, size);
}
/**
* Permute array (with strides)
*
* optr[order[i]] = iptr[i]
*
*/
void md_permute2(unsigned int D, const unsigned int order[D], const long odims[D], const long ostr[D], void* optr, const long idims[D], const long istr[D], const void* iptr, size_t size)
{
unsigned int flags = 0;
long ostr2[D];
for (unsigned int i = 0; i < D; i++) {
assert(order[i] < D);
assert(odims[i] == idims[order[i]]);
flags = MD_SET(flags, order[i]);
ostr2[order[i]] = ostr[i];
}
assert(MD_BIT(D) == flags + 1);
md_copy2(D, idims, ostr2, optr, istr, iptr, size);
}
/**
* Permute array (without strides)
*
* optr[order[i]] = iptr[i]
*
*/
void md_permute(unsigned int D, const unsigned int order[D], const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size)
{
md_permute2(D, order,
odims, MD_STRIDES(D, odims, size), optr,
idims, MD_STRIDES(D, idims, size), iptr, size);
}
/**
* Permute dimensions
*
*
*/
void md_permute_dims(unsigned int D, const unsigned int order[D], long odims[D], const long idims[D])
{
for (unsigned int i = 0; i < D; i++)
odims[i] = idims[order[i]];
}
static void md_transpose_order(unsigned int D, unsigned int order[D], unsigned int dim1, unsigned int dim2)
{
assert(dim1 < D);
assert(dim2 < D);
for (unsigned int i = 0; i < D; i++)
order[i] = i;
order[dim1] = dim2;
order[dim2] = dim1;
}
/**
* Transpose dimensions
*
*
*/
void md_transpose_dims(unsigned int D, unsigned int dim1, unsigned int dim2, long odims[D], const long idims[D])
{
unsigned int order[D];
md_transpose_order(D, order, dim1, dim2);
md_permute_dims(D, order, odims, idims);
}
/**
* Tranpose array (with strides)
*
* optr[dim2] = iptr[dim1]
*
* optr[dim1] = iptr[dim2]
*
*/
void md_transpose2(unsigned int D, unsigned int dim1, unsigned int dim2, const long odims[D], const long ostr[D], void* optr, const long idims[D], const long istr[D], const void* iptr, size_t size)
{
for (unsigned int i = 0; i < D; i++)
if ((i != dim1) && (i != dim2))
assert(odims[i] == idims[i]);
assert(odims[dim1] == idims[dim2]);
assert(odims[dim2] == idims[dim1]);
unsigned int order[D];
md_transpose_order(D, order, dim1, dim2);
md_permute2(D, order, odims, ostr, optr, idims, istr, iptr, size);
}
/**
* Tranpose array (without strides)
*
* optr[dim2] = iptr[dim1]
*
* optr[dim1] = iptr[dim2]
*
*/
void md_transpose(unsigned int D, unsigned int dim1, unsigned int dim2, const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size)
{
md_transpose2(D, dim1, dim2,
odims, MD_STRIDES(D, odims, size), optr,
idims, MD_STRIDES(D, idims, size), iptr, size);
}
static void md_flip_inpl2(unsigned int D, const long dims[D], unsigned long flags, const long str[D], void* ptr, size_t size);
/**
* Swap input and output while flipping selected dimensions
* at the same time.
*/
void md_swap_flip2(unsigned int D, const long dims[D], unsigned long flags, const long ostr[D], void* optr, const long istr[D], void* iptr, size_t size)
{
#if 1
int i;
for (i = D - 1; i >= 0; i--)
if ((1 != dims[i]) && MD_IS_SET(flags, i))
break;
if (-1 == i) {
md_swap2(D, dims, ostr, optr, istr, iptr, size);
return;
}
assert(1 < dims[i]);
assert(ostr[i] != 0);
assert(istr[i] != 0);
long dims2[D];
md_copy_dims(D, dims2, dims);
dims2[i] = dims[i] / 2;
long off = (dims[i] + 1) / 2;
assert(dims2[i] + off == dims[i]);
md_swap_flip2(D, dims2, flags, ostr, optr, istr, iptr + off * istr[i], size);
md_swap_flip2(D, dims2, flags, ostr, optr + off * ostr[i], istr, iptr, size);
// odd, swap center plane
// (we should split in three similar sized chunks instead)
dims2[i] = 1;
if (1 == dims[i] % 2)
md_swap_flip2(D, dims2, flags, ostr, optr + (off - 1) * ostr[i], istr, iptr + (off - 1) * istr[i], size);
#else
// simpler, but more swaps
md_swap2(D, dims, ostr, optr, istr, iptr, size);
md_flip_inpl2(D, dims, flags, ostr, optr, size);
md_flip_inpl2(D, dims, flags, istr, iptr, size);
#endif
}
/**
* Swap input and output while flipping selected dimensions
* at the same time.
*/
void md_swap_flip(unsigned int D, const long dims[D], unsigned long flags, void* optr, void* iptr, size_t size)
{
long strs[D];
md_calc_strides(D, strs, dims, size);
md_swap_flip2(D, dims, flags, strs, optr, strs, iptr, size);
}
static void md_flip_inpl2(unsigned int D, const long dims[D], unsigned long flags, const long str[D], void* ptr, size_t size)
{
int i;
for (i = D - 1; i >= 0; i--)
if ((1 != dims[i]) && MD_IS_SET(flags, i))
break;
if (-1 == i)
return;
assert(1 < dims[i]);
assert(str[i] != 0);
long dims2[D];
md_copy_dims(D, dims2, dims);
dims2[i] = dims[i] / 2;
long off = str[i] * (0 + (dims[i] + 1) / 2);
md_swap_flip2(D, dims2, flags, str, ptr, str, ptr + off, size);
}
/**
* Flip array (with strides)
*
* optr[dims[D] - 1 - i] = iptr[i]
*
*/
void md_flip2(unsigned int D, const long dims[D], unsigned long flags, const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size)
{
if (optr == iptr) {
assert(ostr == istr);
md_flip_inpl2(D, dims, flags, ostr, optr, size);
return;
}
long off = 0;
long ostr2[D];
for (unsigned int i = 0; i < D; i++) {
ostr2[i] = ostr[i];
if (MD_IS_SET(flags, i)) {
ostr2[i] = -ostr[i];
off += (dims[i] - 1) * ostr[i];
}
}
md_copy2(D, dims, ostr2, optr + off, istr, iptr, size);
}
/**
* Flip array (without strides)
*
* optr[dims[D] - 1 - i] = iptr[i]
*
*/
void md_flip(unsigned int D, const long dims[D], unsigned long flags, void* optr, const void* iptr, size_t size)
{
long str[D];
md_calc_strides(D, str, dims, size);
md_flip2(D, dims, flags, str, optr, str, iptr, size);
}
struct compare_s {
bool eq;
size_t size;
};
static void nary_cmp(struct nary_opt_data_s* opt_data, void* ptrs[])
{
struct compare_s* data = opt_data->data_ptr;
size_t size = data->size * opt_data->size;
bool eq = (0 == memcmp(ptrs[0], ptrs[1], size));
#pragma omp critical
data->eq &= eq;
}
bool md_compare2(unsigned int D, const long dims[D], const long str1[D], const void* src1,
const long str2[D], const void* src2, size_t size)
{
struct compare_s data = { true, size };
const long (*nstr[2])[D] = { (const long (*)[D])str1, (const long (*)[D])str2 };
optimized_nop(2, 0u, D, dims, nstr, (void*[2]){ (void*)src1, (void*)src2 }, (size_t[2]){ size, size }, nary_cmp, &data);
return data.eq;
}
bool md_compare(unsigned int D, const long dims[D], const void* src1, const void* src2, size_t size)
{
long str[D];
md_calc_strides(D, str, dims, size);
return md_compare2(D, dims, str, src1, str, src2, size);
}
struct septrafo_s {
long N;
long str;
void* data;
md_trafo_fun_t fun;
};
static void nary_septrafo(void* _data, void* ptr[])
{
struct septrafo_s* data = (struct septrafo_s*)_data;
data->fun(data->data, data->N, data->str, ptr[0]);
}
static void md_septrafo_r(unsigned int D, unsigned int R, long dimensions[D], unsigned long flags, const long strides[D], void* ptr, md_trafo_fun_t fun, void* _data)
{
if (0 == R--)
return;
md_septrafo_r(D, R, dimensions, flags, strides, ptr, fun, _data);
if (MD_IS_SET(flags, R)) {
struct septrafo_s data = { dimensions[R], strides[R], _data, fun };
void* nptr[1] = { ptr };
const long* nstrides[1] = { strides };
dimensions[R] = 1; // we made a copy in md_septrafo2
//md_nary_parallel(1, D, dimensions, nstrides, nptr, &data, nary_septrafo);
md_nary(1, D, dimensions, nstrides, nptr, &data, nary_septrafo);
dimensions[R] = data.N;
}
}
/**
* Apply a separable transformation along selected dimensions.
*
*/
void md_septrafo2(unsigned int D, const long dimensions[D], unsigned long flags, const long strides[D], void* ptr, md_trafo_fun_t fun, void* _data)
{
long dimcopy[D];
md_copy_dims(D, dimcopy, dimensions);
md_septrafo_r(D, D, dimcopy, flags, strides, ptr, fun, _data);
}
/**
* Apply a separable transformation along selected dimensions.
*
*/
void md_septrafo(unsigned int D, const long dims[D], unsigned long flags, void* ptr, size_t size, md_trafo_fun_t fun, void* _data)
{
md_septrafo2(D, dims, flags, MD_STRIDES(D, dims, size), ptr, fun, _data);
}
/**
* Copy diagonals from array specified by flags (with strides)
*
* dst(i, i, :, i, :) = src(i, i, :, i, :)
*
*/
void md_copy_diag2(unsigned int D, const long dims[D], unsigned long flags, const long str1[D], void* dst, const long str2[D], const void* src, size_t size)
{
long stride1 = 0;
long stride2 = 0;
long count = -1;
for (unsigned int i = 0; i < D; i++) {
if (MD_IS_SET(flags, i)) {
if (count < 0)
count = dims[i];
assert(dims[i] == count);
stride1 += str1[i];
stride2 += str2[i];
}
}
long xdims[D];
md_select_dims(D, ~flags, xdims, dims);
for (long i = 0; i < count; i++)
md_copy2(D, xdims, str1, dst + i * stride1, str2, src + i * stride2, size);
}
/**
* Copy diagonals from array specified by flags (without strides)
*
* dst(i ,i ,: ,i , :) = src(i ,i ,: ,i ,:)
*
*/
void md_copy_diag(unsigned int D, const long dims[D], unsigned long flags, void* dst, const void* src, size_t size)
{
long str[D];
md_calc_strides(D, str, dims, size);
md_copy_diag2(D, dims, flags, str, dst, str, src, size);
}
/**
* Fill diagonals specified by flags with value (without strides)
*
* dst(i, i, :, i, :) = src[0]
*
*/
void md_fill_diag(unsigned int D, const long dims[D], unsigned long flags, void* dst, const void* src, size_t size)
{
long str2[D];
md_singleton_strides(D, str2);
md_copy_diag2(D, dims, flags, MD_STRIDES(D, dims, size), dst, str2, src, size);
}
static void md_circ_shift_inpl2(unsigned int D, const long dims[D], const long center[D], const long strs[D], void* dst, size_t size)
{
#if 0
long dims1[D];
long dims2[D];
md_copy_dims(D, dims1, dims);
md_copy_dims(D, dims2, dims);
unsigned int i;
for (i = 0; i < D; i++) {
if (0 != center[i]) {
dims1[i] = center[i];
dims2[i] = dims[i] - center[i];
break;
}
}
if (i == D)
return;
long off = strs[i] * center[i];
// cool but slow, instead we want to have a chain of swaps
md_flip2(D, dims, MD_BIT(i), strs, dst, strs, dst, size);
md_flip2(D, dims1, MD_BIT(i), strs, dst, strs, dst, size);
md_flip2(D, dims2, MD_BIT(i), strs, dst + off, strs, dst + off, size);
// also not efficient, we want to merge the chain of swaps
long center2[D];
md_copy_dims(D, center2, center);
center2[i] = 0;
md_circ_shift_inpl2(D, dims, center2, strs, dst, size);
#else
// use tmp for now
unsigned int i;
for (i = 0; i < D; i++)
if (0 != center[i])
break;
if (i == D)
return;
long tmp_strs[D];
md_calc_strides(D, tmp_strs, dims, size);
void* tmp = md_alloc_sameplace(D, dims, size, dst);
md_copy2(D, dims, tmp_strs, tmp, strs, dst, size);
md_circ_shift2(D, dims, center, strs, dst, tmp_strs, tmp, size);
md_free(tmp);
#endif
}
/**
* Circularly shift array (with strides)
*
* dst[mod(i + center)] = src[i]
*
*/
void md_circ_shift2(unsigned int D, const long dimensions[D], const long center[D], const long str1[D], void* dst, const long str2[D], const void* src, size_t size)
{
long pos[D];
for (unsigned int i = 0; i < D; i++) { // FIXME: it would be better to calc modulo
pos[i] = center[i];
while (pos[i] < 0)
pos[i] += dimensions[i];
}
unsigned int i = 0; // FIXME :maybe we shoud search the other way?
while ((i < D) && (0 == pos[i]))
i++;
if (D == i) {
md_copy2(D, dimensions, str1, dst, str2, src, size);
return;
}
if (dst == src) {
assert(str1 == str2);
md_circ_shift_inpl2(D, dimensions, pos, str1, dst, size);
return;
}
long shift = pos[i];
assert(shift != 0);
long dim1[D];
long dim2[D];
md_copy_dims(D, dim1, dimensions);
md_copy_dims(D, dim2, dimensions);
dim1[i] = shift;
dim2[i] = dimensions[i] - shift;
assert((dim1[i] >= 0) && (dim2[i] >= 0));
pos[i] = 0;
//printf("%d: %ld %ld %d\n", i, dim1[i], dim2[i], sizeof(dimensions));
md_circ_shift2(D, dim1, pos, str1, dst, str2, src + dim2[i] * str2[i], size);
md_circ_shift2(D, dim2, pos, str1, dst + dim1[i] * str1[i], str2, src, size);
}
/**
* Circularly shift array (without strides)
*
* dst[mod(i + center)] = src[i]
*
*/
void md_circ_shift(unsigned int D, const long dimensions[D], const long center[D], void* dst, const void* src, size_t size)
{
long strides[D];
md_calc_strides(D, strides, dimensions, size);
md_circ_shift2(D, dimensions, center, strides, dst, strides, src, size);
}
/**
* Circularly extend array (with strides)
*
*/
void md_circ_ext2(unsigned int D, const long dims1[D], const long strs1[D], void* dst, const long dims2[D], const long strs2[D], const void* src, size_t size)
{
long ext[D];
for (unsigned int i = 0; i < D; i++) {
ext[i] = dims1[i] - dims2[i];
assert(ext[i] >= 0);
assert(ext[i] <= dims2[i]);
}
unsigned int i = 0; // FIXME :maybe we shoud search the other way?
while ((i < D) && (0 == ext[i]))
i++;
if (D == i) {
md_copy2(D, dims1, strs1, dst, strs2, src, size);
return;
}
long dims1_crop[D];
long dims2_crop[D];
long ext_dims[D];
md_copy_dims(D, dims1_crop, dims1);
md_copy_dims(D, dims2_crop, dims2);
md_copy_dims(D, ext_dims, dims1);
dims1_crop[i] = dims2[i];
dims2_crop[i] = ext[i];
ext_dims[i] = ext[i];
ext[i] = 0;
//printf("%d: %ld %ld %d\n", i, dim1[i], dim2[i], sizeof(dimensions));
md_circ_ext2(D, dims1_crop, strs1, dst, dims2, strs2, src, size);
md_circ_ext2(D, ext_dims, strs1, dst + dims2[i] * strs1[i], dims2_crop, strs2, src, size);
}
/**
* Circularly extend array (without strides)
*
*/
void md_circ_ext(unsigned int D, const long dims1[D], void* dst, const long dims2[D], const void* src, size_t size)
{
md_circ_ext2(D, dims1, MD_STRIDES(D, dims1, size), dst,
dims2, MD_STRIDES(D, dims2, size), src, size);
}
/**
* Periodically extend array (with strides)
*
*/
void md_periodic2(unsigned int D, const long dims1[D], const long strs1[D], void* dst, const long dims2[D], const long strs2[D], const void* src, size_t size)
{
long dims1B[2 * D];
long strs1B[2 * D];
long strs2B[2 * D];
for (unsigned int i = 0; i < D; i++) {
assert(0 == dims1[i] % dims2[i]);
// blocks
dims1B[2 * i + 0] = dims2[i];
strs1B[2 * i + 0] = strs1[i];
strs2B[2 * i + 0] = strs2[i];
// periodic copies
dims1B[2 * i + 0] = dims1[i] / dims2[i];
strs1B[2 * i + 0] = strs1[i] * dims2[i];
strs2B[2 * i + 0] = 0;
}
md_copy2(D, dims1B, strs1B, dst, strs2B, src, size);
}
/**
* Periodically extend array (without strides)
*
*/
void md_periodic(unsigned int D, const long dims1[D], void* dst, const long dims2[D], const void* src, size_t size)
{
md_periodic2(D, dims1, MD_STRIDES(D, dims1, size), dst,
dims2, MD_STRIDES(D, dims2, size), src, size);
}
/**
* Allocate CPU memory
*
* return pointer to CPU memory
*/
void* md_alloc(unsigned int D, const long dimensions[D], size_t size)
{
return xmalloc(md_calc_size(D, dimensions) * size);
}
/**
* Allocate CPU memory and clear
*
* return pointer to CPU memory
*/
void* md_calloc(unsigned int D, const long dimensions[D], size_t size)
{
void* ptr = md_alloc(D, dimensions, size);
md_clear(D, dimensions, ptr, size);
return ptr;
}
#ifdef USE_CUDA
/**
* Allocate GPU memory
*
* return pointer to GPU memory
*/
void* md_alloc_gpu(unsigned int D, const long dimensions[D], size_t size)
{
return cuda_malloc(md_calc_size(D, dimensions) * size);
}
/**
* Allocate GPU memory and copy from CPU pointer
*
* return pointer to GPU memory
*/
void* md_gpu_move(unsigned int D, const long dims[D], const void* ptr, size_t size)
{
if (NULL == ptr)
return NULL;
void* gpu_ptr = md_alloc_gpu(D, dims, size);
md_copy(D, dims, gpu_ptr, ptr, size);
return gpu_ptr;
}
#endif
/**
* Allocate memory on the same device (CPU/GPU) place as ptr
*
* return pointer to CPU memory if ptr is in CPU or to GPU memory if ptr is in GPU
*/
void* md_alloc_sameplace(unsigned int D, const long dimensions[D], size_t size, const void* ptr)
{
#ifdef USE_CUDA
return (cuda_ondevice(ptr) ? md_alloc_gpu : md_alloc)(D, dimensions, size);
#else
assert(0 != ptr);
return md_alloc(D, dimensions, size);
#endif
}
/**
* Free CPU/GPU memory
*
*/
void md_free(const void* ptr)
{
#ifdef USE_CUDA
if (cuda_ondevice(ptr))
cuda_free((void*)ptr);
else
#endif
xfree(ptr);
}
|
GB_positional_op_ijp.c | //------------------------------------------------------------------------------
// GB_positional_op_ijp: C = positional_op (A), depending j
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// TODO: rename, and use #ifdef instead of offset = 0 or 1.
// TODO: use this kernel for GrB_extractTuples, to create J array.
// A can be jumbled. If A is jumbled, so is C.
// if A and C are bitmap, not all of Cx need to be written to, but it's faster
// just to write to all of it. C->b is copied from A->b in the caller.
{
//--------------------------------------------------------------------------
// slice the entries for each task
//--------------------------------------------------------------------------
int64_t *pstart_slice = NULL, *kfirst_slice = NULL, *klast_slice = NULL ;
if (!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, A, &ntasks))
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// Cx = positional_op (A)
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
//----------------------------------------------------------------------
// C(:,kfirst:klast) = op (A(:,kfirst:klast))
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) and Cx to be operated on by this task
//------------------------------------------------------------------
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, tid, k,
kfirst, klast, pstart_slice, Ap, avlen) ;
//------------------------------------------------------------------
// C(:,j) = op (A(:,j))
//------------------------------------------------------------------
GB_PRAGMA_SIMD
for (int64_t p = pA_start ; p < pA_end ; p++)
{
// GB_POSITION is j or j+1
Cx_int [p] = GB_POSITION ;
}
}
}
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice) ;
}
#undef GB_POSITION
|
config_number_of_user_threads.c | /*******************************************************************************
* Copyright 2011-2016 Intel Corporation All Rights Reserved.
*
* The source code, information and material ("Material") contained herein is
* owned by Intel Corporation or its suppliers or licensors, and title to such
* Material remains with Intel Corporation or its suppliers or licensors. The
* Material contains proprietary information of Intel or its suppliers and
* licensors. The Material is protected by worldwide copyright laws and treaty
* provisions. No part of the Material may be used, copied, reproduced,
* modified, published, uploaded, posted, transmitted, distributed or disclosed
* in any way without Intel's prior express written permission. No license under
* any patent, copyright or other intellectual property rights in the Material
* is granted to or conferred upon you, either expressly, by implication,
* inducement, estoppel or otherwise. Any license under such intellectual
* property rights must be express and approved by Intel in writing.
*
* Unless otherwise agreed by Intel in writing, you may not remove or alter this
* notice or any other notice embedded in Materials by Intel or Intel's
* suppliers or licensors in any way.
*******************************************************************************/
/*
! Content:
! An example of using DFTI_NUMBER_OF_USER_THREADS configuration parameter.
! The parameter specifies how many user threads (OS threads or OpenMP threads)
! share the descriptor for computation of FFT.
!
! Values:
! Any positive integer (default 1)
!
!****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include "mkl_service.h"
#include "mkl_dfti.h"
static void init(MKL_Complex16 *x, int M, int N1, int N2, int N3,
int H1, int H2, int H3);
static int verify(MKL_Complex16 *x, int M, int N1, int N2, int N3,
int H1, int H2, int H3);
/* Define the format to printf MKL_LONG values */
#if !defined(MKL_ILP64)
#define LI "%li"
#else
#define LI "%lli"
#endif
int main(void)
{
/* Sizes of 3D transform */
int N1 = 5, N2 = 5, N3 = 5;
/* Number of transforms to compute */
int M = 100;
/* Number of user threads sharing the descriptor */
int NUT = 4;
/* Arbitrary harmonic used to verify FFT */
int H1 = -2, H2 = -3, H3 = -4;
/* Execution status */
MKL_LONG status = 0;
/* Pointer to input/output data */
MKL_Complex16 *x = 0;
DFTI_DESCRIPTOR_HANDLE hand = 0;
char version[DFTI_VERSION_LENGTH];
/* Local variables */
int m;
MKL_LONG thr_status;
DftiGetValue(0, DFTI_VERSION, version);
printf("%s\n", version);
printf("Example config_number_of_user_threads\n");
printf("Multiple 3D in-place FFT using shared descriptor\n");
printf("Configuration parameters:\n");
printf(" DFTI_PRECISION = DFTI_DOUBLE\n");
printf(" DFTI_FORWARD_DOMAIN = DFTI_COMPLEX\n");
printf(" DFTI_DIMENSION = 3\n");
printf(" DFTI_LENGTHS = {%d, %d, %d}\n", N1, N2, N3);
printf(" Number of transforms M = %d\n", M);
printf(" DFTI_NUMBER_OF_USER_THREADS = %d\n", NUT);
printf("Create DFTI descriptor\n");
{
MKL_LONG N[3]; N[0] = N1; N[1] = N2; N[2] = N3;
status = DftiCreateDescriptor(&hand, DFTI_DOUBLE, DFTI_COMPLEX, 3, N);
if (0 != status) goto failed;
}
printf("Set configuration: number of user threads\n");
status = DftiSetValue(hand, DFTI_NUMBER_OF_USER_THREADS, NUT);
if (0 != status) goto failed;
printf("Commit descriptor\n");
status = DftiCommitDescriptor(hand);
if (0 != status) goto failed;
printf("Allocate input/output array\n");
x = (MKL_Complex16*)mkl_malloc(M * N1*N2*N3 * sizeof(MKL_Complex16), 64);
if (0 == x) goto failed;
printf("Initialize input\n");
init(x, M, N1, N2, N3, H1, H2, H3);
printf("Compute forward transform by parallel user threads\n");
#if defined(_OPENMP)
#pragma omp parallel for shared(hand, x) private(m, thr_status)
#endif
for (m = 0; m < M; ++m)
{
/*
* If the actual size of parallel team of threads sharing 'hand' is
* greater than 'NUT', the number of user threads set in the
* descriptor, then the performance may be negatively affected.
*/
thr_status = DftiComputeForward(hand, x + m * N1*N2*N3);
/* Update global status only if this thread fails */
if (0 != thr_status) status = thr_status;
}
if (0 != status) goto failed;
printf("Verify the result\n");
status = verify(x, M, N1, N2, N3, H1, H2, H3);
if (0 != status) goto failed;
cleanup:
printf("Free DFTI descriptor\n");
DftiFreeDescriptor(&hand);
printf("Free data array\n");
mkl_free(x);
printf("TEST %s\n",0==status ? "PASSED" : "FAILED");
return status;
failed:
printf(" ERROR, status = "LI"\n", status);
status = 1;
goto cleanup;
}
/* Compute (K*L)%M accurately */
static double moda(int K, int L, int M)
{
return (double)(((long long)K * L) % M);
}
/* Initialize array with harmonic {H1, H2, H3} */
static void init(MKL_Complex16 *x, int M, int N1, int N2, int N3,
int H1, int H2, int H3)
{
double TWOPI = 6.2831853071795864769, phase;
int m, n1, n2, n3, index;
/* Generalized strides for row-major addressing of x */
int SM = N1*N2*N3, S1 = N2*N3, S2 = N3, S3 = 1;
for (m = 0; m < M; m++)
{
for (n1 = 0; n1 < N1; n1++)
{
for (n2 = 0; n2 < N2; n2++)
{
for (n3 = 0; n3 < N3; n3++)
{
phase = moda(n1,H1,N1) / N1;
phase += moda(n2,H2,N2) / N2;
phase += moda(n3,H3,N3) / N3;
index = m*SM + n1*S1 + n2*S2 + n3*S3;
x[index].real = cos( TWOPI * phase ) / (N1*N2*N3);
x[index].imag = sin( TWOPI * phase ) / (N1*N2*N3);
}
}
}
}
}
/* Verify that x(m, n1,n2,n3) are unit peaks at H1,H2,H3 */
static int verify(MKL_Complex16 *x, int M, int N1, int N2, int N3,
int H1, int H2, int H3)
{
double err, errthr, maxerr;
int m, n1, n2, n3, index;
/* Generalized strides for row-major addressing of x */
int SM = N1*N2*N3, S1 = N2*N3, S2 = N3, S3 = 1;
/*
* Note, this simple error bound doesn't take into account error of
* input data
*/
errthr = 5.0 * log( (double)N1*N2*N3 ) / log(2.0) * DBL_EPSILON;
printf(" Verify the result, errthr = %.3lg\n", errthr);
maxerr = 0;
for (m = 0; m < M; m++)
{
for (n1 = 0; n1 < N1; n1++)
{
for (n2 = 0; n2 < N2; n2++)
{
for (n3 = 0; n3 < N3; n3++)
{
double re_exp = 0.0, im_exp = 0.0, re_got, im_got;
if ((n1-H1)%N1==0 && (n2-H2)%N2==0 && (n3-H3)%N3==0)
{
re_exp = 1;
}
index = m*SM + n1*S1 + n2*S2 + n3*S3;
re_got = x[index].real;
im_got = x[index].imag;
err = fabs(re_got - re_exp) + fabs(im_got - im_exp);
if (err > maxerr) maxerr = err;
if (!(err < errthr))
{
printf(" x[%i][%i][%i][%i]: ",m,n1,n2,n3);
printf(" expected (%.17lg,%.17lg), ",re_exp,im_exp);
printf(" got (%.17lg,%.17lg), ",re_got,im_got);
printf(" err %.3lg\n", err);
printf(" Verification FAILED\n");
return 1;
}
}
}
}
}
printf(" Verified, maximum error was %.3lg\n", maxerr);
return 0;
}
|
OpenMPClause.h | //===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for clauses.
/// There are clauses for executable directives, clauses for declarative
/// directives and clauses which can be used in both kinds of directives.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H
#define LLVM_CLANG_AST_OPENMPCLAUSE_H
#include "clang/AST/Expr.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/MapVector.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for clauses.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP clause.
///
class OMPClause {
/// \brief Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// \brief Ending location of the clause.
SourceLocation EndLoc;
/// \brief Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// \brief Returns the starting location of the clause.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns the ending location of the clause.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid(); }
typedef StmtIterator child_iterator;
typedef ConstStmtIterator const_child_iterator;
typedef llvm::iterator_range<child_iterator> child_range;
typedef llvm::iterator_range<const_child_iterator> const_child_range;
child_range children();
const_child_range children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *) { return true; }
};
/// Class that handles pre-initialization statement for some clauses, like
/// 'shedule', 'firstprivate' etc.
class OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Pre-initialization statement for the clause.
Stmt *PreInit;
/// Region that captures the associated stmt.
OpenMPDirectiveKind CaptureRegion;
protected:
/// Set pre-initialization statement for the clause.
void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = OMPD_unknown) {
PreInit = S;
CaptureRegion = ThisRegion;
}
OMPClauseWithPreInit(const OMPClause *This)
: PreInit(nullptr), CaptureRegion(OMPD_unknown) {
assert(get(This) && "get is not tuned for pre-init.");
}
public:
/// Get pre-initialization statement for the clause.
const Stmt *getPreInitStmt() const { return PreInit; }
/// Get pre-initialization statement for the clause.
Stmt *getPreInitStmt() { return PreInit; }
/// Get capture region for the stmt in the clause.
OpenMPDirectiveKind getCaptureRegion() { return CaptureRegion; }
static OMPClauseWithPreInit *get(OMPClause *C);
static const OMPClauseWithPreInit *get(const OMPClause *C);
};
/// Class that handles post-update expression for some clauses, like
/// 'lastprivate', 'reduction' etc.
class OMPClauseWithPostUpdate : public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Post-update expression for the clause.
Expr *PostUpdate;
protected:
/// Set pre-initialization statement for the clause.
void setPostUpdateExpr(Expr *S) { PostUpdate = S; }
OMPClauseWithPostUpdate(const OMPClause *This)
: OMPClauseWithPreInit(This), PostUpdate(nullptr) {
assert(get(This) && "get is not tuned for post-update.");
}
public:
/// Get post-update expression for the clause.
const Expr *getPostUpdateExpr() const { return PostUpdate; }
/// Get post-update expression for the clause.
Expr *getPostUpdateExpr() { return PostUpdate; }
static OMPClauseWithPostUpdate *get(OMPClause *C);
static const OMPClauseWithPostUpdate *get(const OMPClause *C);
};
/// \brief This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T> class OMPVarListClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Number of variables in the list.
unsigned NumVars;
protected:
/// \brief Fetches list of variables associated with this clause.
MutableArrayRef<Expr *> getVarRefs() {
return MutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars);
}
/// \brief Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(),
static_cast<T *>(this)->template getTrailingObjects<Expr *>());
}
/// \brief Build a clause with \a N variables
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
: OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {}
public:
typedef MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
typedef llvm::iterator_range<varlist_iterator> varlist_range;
typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return llvm::makeArrayRef(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
NumVars);
}
};
/// \brief This represents 'if' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel if(parallel:a > 5)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'if' clause with
/// condition 'a > 5' and directive name modifier 'parallel'.
///
class OMPIfClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Condition of the 'if' clause.
Stmt *Condition;
/// \brief Location of ':' (if any).
SourceLocation ColonLoc;
/// \brief Directive name modifier for the clause.
OpenMPDirectiveKind NameModifier;
/// \brief Name modifier location.
SourceLocation NameModifierLoc;
/// \brief Set condition.
///
void setCondition(Expr *Cond) { Condition = Cond; }
/// \brief Set directive name modifier for the clause.
///
void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; }
/// \brief Set location of directive name modifier for the clause.
///
void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; }
/// \brief Set location of ':'.
///
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// \brief Build 'if' clause with condition \a Cond.
///
/// \param NameModifier [OpenMP 4.1] Directive name modifier of clause.
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param NameModifierLoc Location of directive name modifier.
/// \param ColonLoc [OpenMP 4.1] Location of ':'.
/// \param EndLoc Ending location of the clause.
///
OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation NameModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc)
: OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc),
NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// \brief Build an empty clause.
///
OMPIfClause()
: OMPClause(OMPC_if, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this), LParenLoc(), Condition(nullptr), ColonLoc(),
NameModifier(OMPD_unknown), NameModifierLoc() {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
/// \brief Return directive name modifier associated with the clause.
OpenMPDirectiveKind getNameModifier() const { return NameModifier; }
/// \brief Return the location of directive name modifier.
SourceLocation getNameModifierLoc() const { return NameModifierLoc; }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_if;
}
child_range children() { return child_range(&Condition, &Condition + 1); }
};
/// \brief This represents 'final' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task final(a > 5)
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
///
class OMPFinalClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Condition of the 'if' clause.
Stmt *Condition;
/// \brief Set condition.
///
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// \brief Build 'final' clause with condition \a Cond.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Cond Condition of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc),
Condition(Cond) {}
/// \brief Build an empty clause.
///
OMPFinalClause()
: OMPClause(OMPC_final, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Condition(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_final;
}
child_range children() { return child_range(&Condition, &Condition + 1); }
};
/// \brief This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel num_threads(6)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
///
class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Condition of the 'num_threads' clause.
Stmt *NumThreads;
/// \brief Set condition.
///
void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
public:
/// \brief Build 'num_threads' clause with condition \a NumThreads.
///
/// \param NumThreads Number of threads for the construct.
/// \param HelperNumThreads Helper Number of threads for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_threads, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
NumThreads(NumThreads) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
}
/// \brief Build an empty clause.
///
OMPNumThreadsClause()
: OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this), LParenLoc(SourceLocation()),
NumThreads(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns number of threads.
Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_threads;
}
child_range children() { return child_range(&NumThreads, &NumThreads + 1); }
};
/// \brief This represents 'safelen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd safelen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'safelen'
/// with single expression '4'.
/// If the safelen clause is used then no two iterations executed
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
///
class OMPSafelenClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Safe iteration space distance.
Stmt *Safelen;
/// \brief Set safelen.
void setSafelen(Expr *Len) { Safelen = Len; }
public:
/// \brief Build 'safelen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Safelen(Len) {}
/// \brief Build an empty clause.
///
explicit OMPSafelenClause()
: OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Safelen(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return safe iteration space distance.
Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_safelen;
}
child_range children() { return child_range(&Safelen, &Safelen + 1); }
};
/// \brief This represents 'simdlen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd simdlen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'simdlen'
/// with single expression '4'.
/// If the 'simdlen' clause is used then it specifies the preferred number of
/// iterations to be executed concurrently. The parameter of the 'simdlen'
/// clause must be a constant positive integer expression.
///
class OMPSimdlenClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Safe iteration space distance.
Stmt *Simdlen;
/// \brief Set simdlen.
void setSimdlen(Expr *Len) { Simdlen = Len; }
public:
/// \brief Build 'simdlen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Simdlen(Len) {}
/// \brief Build an empty clause.
///
explicit OMPSimdlenClause()
: OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Simdlen(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return safe iteration space distance.
Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simdlen;
}
child_range children() { return child_range(&Simdlen, &Simdlen + 1); }
};
/// \brief This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd collapse(3)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'collapse'
/// with single expression '3'.
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
///
class OMPCollapseClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Number of for-loops.
Stmt *NumForLoops;
/// \brief Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// \brief Build 'collapse' clause.
///
/// \param Num Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// \brief Build an empty clause.
///
explicit OMPCollapseClause()
: OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), NumForLoops(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_collapse;
}
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
};
/// \brief This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
///
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'default' clause.
OpenMPDefaultClauseKind Kind;
/// \brief Start location of the kind in source code.
SourceLocation KindKwLoc;
/// \brief Set kind of the clauses.
///
/// \param K Argument of clause.
///
void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; }
/// \brief Set argument location.
///
/// \param KLoc Argument location.
///
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// \brief Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// \brief Build an empty clause.
///
OMPDefaultClause()
: OMPClause(OMPC_default, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Kind(OMPC_DEFAULT_unknown),
KindKwLoc(SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns kind of the clause.
OpenMPDefaultClauseKind getDefaultKind() const { return Kind; }
/// \brief Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_default;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel proc_bind(master)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'proc_bind'
/// clause with kind 'master'.
///
class OMPProcBindClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'proc_bind' clause.
OpenMPProcBindClauseKind Kind;
/// \brief Start location of the kind in source code.
SourceLocation KindKwLoc;
/// \brief Set kind of the clause.
///
/// \param K Kind of clause.
///
void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; }
/// \brief Set clause kind location.
///
/// \param KLoc Kind location.
///
void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// \brief Build 'proc_bind' clause with argument \a A ('master', 'close' or
/// 'spread').
///
/// \param A Argument of the clause ('master', 'close' or 'spread').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// \brief Build an empty clause.
///
OMPProcBindClause()
: OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Kind(OMPC_PROC_BIND_unknown),
KindKwLoc(SourceLocation()) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns kind of the clause.
OpenMPProcBindClauseKind getProcBindKind() const { return Kind; }
/// \brief Returns location of clause kind.
SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_proc_bind;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp for' has 'schedule' clause with
/// arguments 'static' and '3'.
///
class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'schedule' clause.
OpenMPScheduleClauseKind Kind;
/// \brief Modifiers for 'schedule' clause.
enum {FIRST, SECOND, NUM_MODIFIERS};
OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS];
/// \brief Locations of modifiers.
SourceLocation ModifiersLoc[NUM_MODIFIERS];
/// \brief Start location of the schedule ind in source code.
SourceLocation KindLoc;
/// \brief Location of ',' (if any).
SourceLocation CommaLoc;
/// \brief Chunk size.
Expr *ChunkSize;
/// \brief Set schedule kind.
///
/// \param K Schedule kind.
///
void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; }
/// \brief Set the first schedule modifier.
///
/// \param M Schedule modifier.
///
void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[FIRST] = M;
}
/// \brief Set the second schedule modifier.
///
/// \param M Schedule modifier.
///
void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[SECOND] = M;
}
/// \brief Set location of the first schedule modifier.
///
void setFirstScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[FIRST] = Loc;
}
/// \brief Set location of the second schedule modifier.
///
void setSecondScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[SECOND] = Loc;
}
/// \brief Set schedule modifier location.
///
/// \param M Schedule modifier location.
///
void setScheduleModifer(OpenMPScheduleClauseModifier M) {
if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown)
Modifiers[FIRST] = M;
else {
assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown);
Modifiers[SECOND] = M;
}
}
/// \brief Sets the location of '('.
///
/// \param Loc Location of '('.
///
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
///
void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// \brief Set location of ','.
///
/// \param Loc Location of ','.
///
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// \brief Set chunk size.
///
/// \param E Chunk size.
///
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// \brief Build 'schedule' clause with schedule kind \a Kind and chunk size
/// expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind Schedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
/// \param M1 The first modifier applied to 'schedule' clause.
/// \param M1Loc Location of the first modifier
/// \param M2 The second modifier applied to 'schedule' clause.
/// \param M2Loc Location of the second modifier
///
OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc, OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, Stmt *HelperChunkSize,
OpenMPScheduleClauseModifier M1, SourceLocation M1Loc,
OpenMPScheduleClauseModifier M2, SourceLocation M2Loc)
: OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc),
ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
Modifiers[FIRST] = M1;
Modifiers[SECOND] = M2;
ModifiersLoc[FIRST] = M1Loc;
ModifiersLoc[SECOND] = M2Loc;
}
/// \brief Build an empty clause.
///
explicit OMPScheduleClause()
: OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this), Kind(OMPC_SCHEDULE_unknown),
ChunkSize(nullptr) {
Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown;
Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown;
}
/// \brief Get kind of the clause.
///
OpenMPScheduleClauseKind getScheduleKind() const { return Kind; }
/// \brief Get the first modifier of the clause.
///
OpenMPScheduleClauseModifier getFirstScheduleModifier() const {
return Modifiers[FIRST];
}
/// \brief Get the second modifier of the clause.
///
OpenMPScheduleClauseModifier getSecondScheduleModifier() const {
return Modifiers[SECOND];
}
/// \brief Get location of '('.
///
SourceLocation getLParenLoc() { return LParenLoc; }
/// \brief Get kind location.
///
SourceLocation getScheduleKindLoc() { return KindLoc; }
/// \brief Get the first modifier location.
///
SourceLocation getFirstScheduleModifierLoc() const {
return ModifiersLoc[FIRST];
}
/// \brief Get the second modifier location.
///
SourceLocation getSecondScheduleModifierLoc() const {
return ModifiersLoc[SECOND];
}
/// \brief Get location of ','.
///
SourceLocation getCommaLoc() { return CommaLoc; }
/// \brief Get chunk size.
///
Expr *getChunkSize() { return ChunkSize; }
/// \brief Get chunk size.
///
const Expr *getChunkSize() const { return ChunkSize; }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_schedule;
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
};
/// \brief This represents 'ordered' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for ordered (2)
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause with
/// parameter 2.
///
class OMPOrderedClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Number of for-loops.
Stmt *NumForLoops;
/// \brief Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// \brief Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPOrderedClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// \brief Build an empty clause.
///
explicit OMPOrderedClause()
: OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), NumForLoops(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_ordered;
}
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
};
/// \brief This represents 'nowait' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
///
class OMPNowaitClause : public OMPClause {
public:
/// \brief Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nowait, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPNowaitClause()
: OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nowait;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'untied' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task untied
/// \endcode
/// In this example directive '#pragma omp task' has 'untied' clause.
///
class OMPUntiedClause : public OMPClause {
public:
/// \brief Build 'untied' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_untied, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPUntiedClause()
: OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_untied;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'mergeable' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task mergeable
/// \endcode
/// In this example directive '#pragma omp task' has 'mergeable' clause.
///
class OMPMergeableClause : public OMPClause {
public:
/// \brief Build 'mergeable' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_mergeable, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPMergeableClause()
: OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_mergeable;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'read' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic read
/// \endcode
/// In this example directive '#pragma omp atomic' has 'read' clause.
///
class OMPReadClause : public OMPClause {
public:
/// \brief Build 'read' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_read, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_read;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'write' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic write
/// \endcode
/// In this example directive '#pragma omp atomic' has 'write' clause.
///
class OMPWriteClause : public OMPClause {
public:
/// \brief Build 'write' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_write, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPWriteClause()
: OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_write;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'update' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
///
class OMPUpdateClause : public OMPClause {
public:
/// \brief Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_update, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPUpdateClause()
: OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_update;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'capture' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has 'capture' clause.
///
class OMPCaptureClause : public OMPClause {
public:
/// \brief Build 'capture' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_capture, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPCaptureClause()
: OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_capture;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic seq_cst
/// \endcode
/// In this example directive '#pragma omp atomic' has 'seq_cst' clause.
///
class OMPSeqCstClause : public OMPClause {
public:
/// \brief Build 'seq_cst' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPSeqCstClause()
: OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_seq_cst;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
///
class OMPPrivateClause final
: public OMPVarListClause<OMPPrivateClause>,
private llvm::TrailingObjects<OMPPrivateClause, Expr *> {
friend TrailingObjects;
friend OMPVarListClause;
friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPPrivateClause(unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// \brief Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// \brief Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PrivateVL List of references to private copies with initializers.
///
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PrivateVL);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
typedef MutableArrayRef<Expr *>::iterator private_copies_iterator;
typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator;
typedef llvm::iterator_range<private_copies_iterator> private_copies_range;
typedef llvm::iterator_range<private_copies_const_iterator>
private_copies_const_range;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_private;
}
};
/// \brief This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
///
class OMPFirstprivateClause final
: public OMPVarListClause<OMPFirstprivateClause>,
public OMPClauseWithPreInit,
private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> {
friend TrailingObjects;
friend OMPVarListClause;
friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPreInit(this) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPreInit(this) {}
/// \brief Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// \brief Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Sets the list of references to initializer variables for new
/// private variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// \brief Gets the list of references to initializer variables for new
/// private variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
/// \param PrivateVL List of references to private copies with initializers.
/// \param InitVL List of references to auto generated variables used for
/// initialization of a single array element. Used if firstprivate variable is
/// of array type.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
///
static OMPFirstprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL, Stmt *PreInit);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
typedef MutableArrayRef<Expr *>::iterator private_copies_iterator;
typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator;
typedef llvm::iterator_range<private_copies_iterator> private_copies_range;
typedef llvm::iterator_range<private_copies_const_iterator>
private_copies_const_range;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
typedef MutableArrayRef<Expr *>::iterator inits_iterator;
typedef ArrayRef<const Expr *>::iterator inits_const_iterator;
typedef llvm::iterator_range<inits_iterator> inits_range;
typedef llvm::iterator_range<inits_const_iterator> inits_const_range;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_firstprivate;
}
};
/// \brief This represents clause 'lastprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd lastprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
/// with the variables 'a' and 'b'.
class OMPLastprivateClause final
: public OMPVarListClause<OMPLastprivateClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
// each non-firstprivate variables. Used in codegen for initialization of
// lastprivate copies.
// 2. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents private variables
// (for arrays, single array element).
// 3. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents original variables
// (for arrays, single array element).
// 4. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of final assignment performed by the
// lastprivate clause.
//
friend TrailingObjects;
friend OMPVarListClause;
friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// \brief Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// \brief Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent original variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// \brief Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign private copy of the variable to original variable.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// \brief Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// private variables (for arrays, single array element).
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// original variables (for arrays, single array element).
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
///
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps,
Stmt *PreInit, Expr *PostUpdate);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator;
typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator;
typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range;
typedef llvm::iterator_range<helper_expr_const_iterator>
helper_expr_const_range;
/// \brief Set list of helper expressions, required for generation of private
/// copies of original lastprivate variables.
void setPrivateCopies(ArrayRef<Expr *> PrivateCopies);
helper_expr_const_range private_copies() const {
return helper_expr_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_range private_copies() {
return helper_expr_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_lastprivate;
}
};
/// \brief This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
///
class OMPSharedClause final
: public OMPVarListClause<OMPSharedClause>,
private llvm::TrailingObjects<OMPSharedClause, Expr *> {
friend TrailingObjects;
friend OMPVarListClause;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPSharedClause(unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
///
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_shared;
}
};
/// \brief This represents clause 'reduction' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'reduction'
/// with operator '+' and the variables 'a' and 'b'.
///
class OMPReductionClause final
: public OMPVarListClause<OMPReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPReductionClause, Expr *> {
friend TrailingObjects;
friend OMPVarListClause;
friend class OMPClauseReader;
/// \brief Location of ':'.
SourceLocation ColonLoc;
/// \brief Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// \brief Name of custom operator.
DeclarationNameInfo NameInfo;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
///
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPReductionClause(unsigned N)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
OMPClauseWithPostUpdate(this), ColonLoc(), QualifierLoc(), NameInfo() {}
/// \brief Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// \brief Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// \brief Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private copy of the reduction
/// variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// \brief Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent LHS expression in the final
/// reduction expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// \brief Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent RHS expression in the final
/// reduction expression performed by the reduction clause.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// \brief Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// \brief Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
///
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// \brief Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// \brief Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// \brief Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator;
typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator;
typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range;
typedef llvm::iterator_range<helper_expr_const_iterator>
helper_expr_const_range;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reduction;
}
};
/// This represents clause 'task_reduction' in the '#pragma omp taskgroup'
/// directives.
///
/// \code
/// #pragma omp taskgroup task_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp taskgroup' has clause
/// 'task_reduction' with operator '+' and the variables 'a' and 'b'.
///
class OMPTaskReductionClause final
: public OMPVarListClause<OMPTaskReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> {
friend TrailingObjects;
friend OMPVarListClause;
friend class OMPClauseReader;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
///
OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPTaskReductionClause(unsigned N)
: OMPVarListClause<OMPTaskReductionClause>(
OMPC_task_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this), ColonLoc(), QualifierLoc(), NameInfo() {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
///
static OMPTaskReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator;
typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator;
typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range;
typedef llvm::iterator_range<helper_expr_const_iterator>
helper_expr_const_range;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_task_reduction;
}
};
/// \brief This represents clause 'linear' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd linear(a,b : 2)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'linear'
/// with variables 'a', 'b' and linear step '2'.
///
class OMPLinearClause final
: public OMPVarListClause<OMPLinearClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLinearClause, Expr *> {
friend TrailingObjects;
friend OMPVarListClause;
friend class OMPClauseReader;
/// \brief Modifier of 'linear' clause.
OpenMPLinearClauseKind Modifier;
/// \brief Location of linear modifier if any.
SourceLocation ModifierLoc;
/// \brief Location of ':'.
SourceLocation ColonLoc;
/// \brief Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
/// \brief Sets the expression to calculate linear step for clause.
void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; }
/// \brief Build 'linear' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
///
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc,
EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of variables.
///
explicit OMPLinearClause(unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars),
OMPClauseWithPostUpdate(this), Modifier(OMPC_LINEAR_val), ModifierLoc(),
ColonLoc() {}
/// \brief Gets the list of initial values for linear variables.
///
/// There are NumVars expressions with initial values allocated after the
/// varlist, they are followed by NumVars update expressions (used to update
/// the linear variable's value on current iteration) and they are followed by
/// NumVars final expressions (used to calculate the linear variable's
/// value after the loop body). After these lists, there are 2 helper
/// expressions - linear step and a helper to calculate it before the
/// loop body (used when the linear step is not constant):
///
/// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[];
/// Finals[]; Step; CalcStep; }
///
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// \brief Sets the list of update expressions for linear variables.
MutableArrayRef<Expr *> getUpdates() {
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
return llvm::makeArrayRef(getInits().end(), varlist_size());
}
/// \brief Sets the list of final update expressions for linear variables.
MutableArrayRef<Expr *> getFinals() {
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
return llvm::makeArrayRef(getUpdates().end(), varlist_size());
}
/// \brief Sets the list of the copies of original linear variables.
/// \param PL List of expressions.
void setPrivates(ArrayRef<Expr *> PL);
/// \brief Sets the list of the initial values for linear variables.
/// \param IL List of expressions.
void setInits(ArrayRef<Expr *> IL);
public:
/// \brief Creates clause with a list of variables \a VL and a linear step
/// \a Step.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Modifier Modifier of 'linear' clause.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PL List of private copies of original variables.
/// \param IL List of initial values for the variables.
/// \param Step Linear step.
/// \param CalcStep Calculation of the linear step.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLinearClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
Stmt *PreInit, Expr *PostUpdate);
/// \brief Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
///
static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// \brief Set modifier.
void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; }
/// \brief Return modifier.
OpenMPLinearClauseKind getModifier() const { return Modifier; }
/// \brief Set modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// \brief Return modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// \brief Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// \brief Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
/// \brief Returns linear step.
const Expr *getStep() const { return *(getFinals().end()); }
/// \brief Returns expression to calculate linear step.
Expr *getCalcStep() { return *(getFinals().end() + 1); }
/// \brief Returns expression to calculate linear step.
const Expr *getCalcStep() const { return *(getFinals().end() + 1); }
/// \brief Sets the list of update expressions for linear variables.
/// \param UL List of expressions.
void setUpdates(ArrayRef<Expr *> UL);
/// \brief Sets the list of final update expressions for linear variables.
/// \param FL List of expressions.
void setFinals(ArrayRef<Expr *> FL);
typedef MutableArrayRef<Expr *>::iterator privates_iterator;
typedef ArrayRef<const Expr *>::iterator privates_const_iterator;
typedef llvm::iterator_range<privates_iterator> privates_range;
typedef llvm::iterator_range<privates_const_iterator> privates_const_range;
privates_range privates() {
return privates_range(getPrivates().begin(), getPrivates().end());
}
privates_const_range privates() const {
return privates_const_range(getPrivates().begin(), getPrivates().end());
}
typedef MutableArrayRef<Expr *>::iterator inits_iterator;
typedef ArrayRef<const Expr *>::iterator inits_const_iterator;
typedef llvm::iterator_range<inits_iterator> inits_range;
typedef llvm::iterator_range<inits_const_iterator> inits_const_range;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
typedef MutableArrayRef<Expr *>::iterator updates_iterator;
typedef ArrayRef<const Expr *>::iterator updates_const_iterator;
typedef llvm::iterator_range<updates_iterator> updates_range;
typedef llvm::iterator_range<updates_const_iterator> updates_const_range;
updates_range updates() {
return updates_range(getUpdates().begin(), getUpdates().end());
}
updates_const_range updates() const {
return updates_const_range(getUpdates().begin(), getUpdates().end());
}
typedef MutableArrayRef<Expr *>::iterator finals_iterator;
typedef ArrayRef<const Expr *>::iterator finals_const_iterator;
typedef llvm::iterator_range<finals_iterator> finals_range;
typedef llvm::iterator_range<finals_const_iterator> finals_const_range;
finals_range finals() {
return finals_range(getFinals().begin(), getFinals().end());
}
finals_const_range finals() const {
return finals_const_range(getFinals().begin(), getFinals().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_linear;
}
};
/// \brief This represents clause 'aligned' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd aligned(a,b : 8)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'aligned'
/// with variables 'a', 'b' and alignment '8'.
///
class OMPAlignedClause final
: public OMPVarListClause<OMPAlignedClause>,
private llvm::TrailingObjects<OMPAlignedClause, Expr *> {
friend TrailingObjects;
friend OMPVarListClause;
friend class OMPClauseReader;
/// \brief Location of ':'.
SourceLocation ColonLoc;
/// \brief Sets the alignment for clause.
void setAlignment(Expr *A) { *varlist_end() = A; }
/// \brief Build 'aligned' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
///
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc,
EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of variables.
///
explicit OMPAlignedClause(unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars),
ColonLoc(SourceLocation()) {}
public:
/// \brief Creates clause with a list of variables \a VL and alignment \a A.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param A Alignment.
static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
Expr *A);
/// \brief Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
///
static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// \brief Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// \brief Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// \brief Returns alignment.
Expr *getAlignment() { return *varlist_end(); }
/// \brief Returns alignment.
const Expr *getAlignment() const { return *varlist_end(); }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_aligned;
}
};
/// \brief This represents clause 'copyin' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel copyin(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'copyin'
/// with the variables 'a' and 'b'.
///
class OMPCopyinClause final
: public OMPVarListClause<OMPCopyinClause>,
private llvm::TrailingObjects<OMPCopyinClause, Expr *> {
// Class has 3 additional tail allocated arrays:
// 1. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents sources.
// 2. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents destinations.
// 3. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of propagation of master's thread values of
// threadprivate variables to local instances of that variables in other
// implicit threads.
friend TrailingObjects;
friend OMPVarListClause;
friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPCopyinClause(unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyin clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// \brief Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyin clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// \brief Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// \brief Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of propagation of master's thread values of
/// threadprivate variables to local instances of that variables in other
/// implicit threads.
///
static OMPCopyinClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator;
typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator;
typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range;
typedef llvm::iterator_range<helper_expr_const_iterator>
helper_expr_const_range;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyin;
}
};
/// \brief This represents clause 'copyprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp single copyprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp single' has clause 'copyprivate'
/// with the variables 'a' and 'b'.
///
class OMPCopyprivateClause final
: public OMPVarListClause<OMPCopyprivateClause>,
private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> {
friend TrailingObjects;
friend OMPVarListClause;
friend class OMPClauseReader;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc,
LParenLoc, EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// \brief Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// \brief Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// \brief Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// \brief Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// \brief Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// copyprivate clause.
///
static OMPCopyprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
typedef MutableArrayRef<Expr *>::iterator helper_expr_iterator;
typedef ArrayRef<const Expr *>::iterator helper_expr_const_iterator;
typedef llvm::iterator_range<helper_expr_iterator> helper_expr_range;
typedef llvm::iterator_range<helper_expr_const_iterator>
helper_expr_const_range;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyprivate;
}
};
/// \brief This represents implicit clause 'flush' for the '#pragma omp flush'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// flush' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
/// with the variables 'a' and 'b'.
///
class OMPFlushClause final
: public OMPVarListClause<OMPFlushClause>,
private llvm::TrailingObjects<OMPFlushClause, Expr *> {
friend TrailingObjects;
friend OMPVarListClause;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc,
EndLoc, N) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPFlushClause(unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
///
static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_flush;
}
};
/// \brief This represents implicit clause 'depend' for the '#pragma omp task'
/// directive.
///
/// \code
/// #pragma omp task depend(in:a,b)
/// \endcode
/// In this example directive '#pragma omp task' with clause 'depend' with the
/// variables 'a' and 'b' with dependency 'in'.
///
class OMPDependClause final
: public OMPVarListClause<OMPDependClause>,
private llvm::TrailingObjects<OMPDependClause, Expr *> {
friend TrailingObjects;
friend OMPVarListClause;
friend class OMPClauseReader;
/// \brief Dependency type (one of in, out, inout).
OpenMPDependClauseKind DepKind;
/// \brief Dependency type location.
SourceLocation DepLoc;
/// \brief Colon location.
SourceLocation ColonLoc;
/// \brief Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
///
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc,
EndLoc, N),
DepKind(OMPC_DEPEND_unknown) {}
/// \brief Build an empty clause.
///
/// \param N Number of variables.
///
explicit OMPDependClause(unsigned N)
: OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
DepKind(OMPC_DEPEND_unknown) {}
/// \brief Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
/// \brief Set dependency kind and its location.
void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
/// \brief Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param DepKind Dependency type.
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
static OMPDependClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL);
/// \brief Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
///
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N);
/// \brief Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
/// \brief Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
/// \brief Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Set the loop counter value for the depend clauses with 'sink|source' kind
/// of dependency. Required for codegen.
void setCounterValue(Expr *V);
/// Get the loop counter value.
Expr *getCounterValue();
/// Get the loop counter value.
const Expr *getCounterValue() const;
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_depend;
}
};
/// \brief This represents 'device' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp target device(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'device'
/// with single expression 'a'.
///
class OMPDeviceClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Device number.
Stmt *Device;
/// \brief Set the device number.
///
/// \param E Device number.
///
void setDevice(Expr *E) { Device = E; }
public:
/// \brief Build 'device' clause.
///
/// \param E Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPDeviceClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_device, StartLoc, EndLoc), LParenLoc(LParenLoc),
Device(E) {}
/// \brief Build an empty clause.
///
OMPDeviceClause()
: OMPClause(OMPC_device, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Device(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return device number.
Expr *getDevice() { return cast<Expr>(Device); }
/// \brief Return device number.
Expr *getDevice() const { return cast<Expr>(Device); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_device;
}
child_range children() { return child_range(&Device, &Device + 1); }
};
/// \brief This represents 'threads' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered threads
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'threads' clause.
///
class OMPThreadsClause : public OMPClause {
public:
/// \brief Build 'threads' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_threads, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPThreadsClause()
: OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_threads;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'simd' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered simd
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'simd' clause.
///
class OMPSIMDClause : public OMPClause {
public:
/// \brief Build 'simd' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_simd, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simd;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief Struct that defines common infrastructure to handle mappable
/// expressions used in OpenMP clauses.
class OMPClauseMappableExprCommon {
public:
// \brief Class that represents a component of a mappable expression. E.g.
// for an expression S.a, the first component is a declaration reference
// expression associated with 'S' and the second is a member expression
// associated with the field declaration 'a'. If the expression is an array
// subscript it may not have any associated declaration. In that case the
// associated declaration is set to nullptr.
class MappableComponent {
// \brief Expression associated with the component.
Expr *AssociatedExpression = nullptr;
// \brief Declaration associated with the declaration. If the component does
// not have a declaration (e.g. array subscripts or section), this is set to
// nullptr.
ValueDecl *AssociatedDeclaration = nullptr;
public:
explicit MappableComponent() {}
explicit MappableComponent(Expr *AssociatedExpression,
ValueDecl *AssociatedDeclaration)
: AssociatedExpression(AssociatedExpression),
AssociatedDeclaration(
AssociatedDeclaration
? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl())
: nullptr) {}
Expr *getAssociatedExpression() const { return AssociatedExpression; }
ValueDecl *getAssociatedDeclaration() const {
return AssociatedDeclaration;
}
};
// \brief List of components of an expression. This first one is the whole
// expression and the last one is the base expression.
typedef SmallVector<MappableComponent, 8> MappableExprComponentList;
typedef ArrayRef<MappableComponent> MappableExprComponentListRef;
// \brief List of all component lists associated to the same base declaration.
// E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have
// their component list but the same base declaration 'S'.
typedef SmallVector<MappableExprComponentList, 8> MappableExprComponentLists;
typedef ArrayRef<MappableExprComponentList> MappableExprComponentListsRef;
protected:
// \brief Return the total number of elements in a list of component lists.
static unsigned
getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists);
// \brief Return the total number of elements in a list of declarations. All
// declarations are expected to be canonical.
static unsigned
getUniqueDeclarationsTotalNumber(ArrayRef<ValueDecl *> Declarations);
};
/// \brief This represents clauses with a list of expressions that are mappable.
/// Examples of these clauses are 'map' in
/// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from
/// in '#pragma omp target update...' directives.
template <class T>
class OMPMappableExprListClause : public OMPVarListClause<T>,
public OMPClauseMappableExprCommon {
friend class OMPClauseReader;
/// \brief Number of unique declarations in this clause.
unsigned NumUniqueDeclarations;
/// \brief Number of component lists in this clause.
unsigned NumComponentLists;
/// \brief Total number of components in this clause.
unsigned NumComponents;
protected:
/// \brief Get the unique declarations that are in the trailing objects of the
/// class.
MutableArrayRef<ValueDecl *> getUniqueDeclsRef() {
return MutableArrayRef<ValueDecl *>(
static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// \brief Get the unique declarations that are in the trailing objects of the
/// class.
ArrayRef<ValueDecl *> getUniqueDeclsRef() const {
return ArrayRef<ValueDecl *>(
static_cast<const T *>(this)
->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// \brief Set the unique declarations that are in the trailing objects of the
/// class.
void setUniqueDecls(ArrayRef<ValueDecl *> UDs) {
assert(UDs.size() == NumUniqueDeclarations &&
"Unexpected amount of unique declarations.");
std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin());
}
/// \brief Get the number of lists per declaration that are in the trailing
/// objects of the class.
MutableArrayRef<unsigned> getDeclNumListsRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// \brief Get the number of lists per declaration that are in the trailing
/// objects of the class.
ArrayRef<unsigned> getDeclNumListsRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// \brief Set the number of lists per declaration that are in the trailing
/// objects of the class.
void setDeclNumLists(ArrayRef<unsigned> DNLs) {
assert(DNLs.size() == NumUniqueDeclarations &&
"Unexpected amount of list numbers.");
std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin());
}
/// \brief Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
MutableArrayRef<unsigned> getComponentListSizesRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// \brief Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
ArrayRef<unsigned> getComponentListSizesRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// \brief Set the cumulative component lists sizes that are in the trailing
/// objects of the class.
void setComponentListSizes(ArrayRef<unsigned> CLSs) {
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of component lists.");
std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin());
}
/// \brief Get the components that are in the trailing objects of the class.
MutableArrayRef<MappableComponent> getComponentsRef() {
return MutableArrayRef<MappableComponent>(
static_cast<T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// \brief Get the components that are in the trailing objects of the class.
ArrayRef<MappableComponent> getComponentsRef() const {
return ArrayRef<MappableComponent>(
static_cast<const T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// \brief Set the components that are in the trailing objects of the class.
/// This requires the list sizes so that it can also fill the original
/// expressions, which are the first component of each list.
void setComponents(ArrayRef<MappableComponent> Components,
ArrayRef<unsigned> CLSs) {
assert(Components.size() == NumComponents &&
"Unexpected amount of component lists.");
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of list sizes.");
std::copy(Components.begin(), Components.end(), getComponentsRef().begin());
}
/// \brief Fill the clause information from the list of declarations and
/// associated component lists.
void setClauseInfo(ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
// Perform some checks to make sure the data sizes are consistent with the
// information available when the clause was created.
assert(getUniqueDeclarationsTotalNumber(Declarations) ==
NumUniqueDeclarations &&
"Unexpected number of mappable expression info entries!");
assert(getComponentsTotalNumber(ComponentLists) == NumComponents &&
"Unexpected total number of components!");
assert(Declarations.size() == ComponentLists.size() &&
"Declaration and component lists size is not consistent!");
assert(Declarations.size() == NumComponentLists &&
"Unexpected declaration and component lists size!");
// Organize the components by declaration and retrieve the original
// expression. Original expressions are always the first component of the
// mappable component list.
llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>>
ComponentListMap;
{
auto CI = ComponentLists.begin();
for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE;
++DI, ++CI) {
assert(!CI->empty() && "Invalid component list!");
ComponentListMap[*DI].push_back(*CI);
}
}
// Iterators of the target storage.
auto UniqueDeclarations = getUniqueDeclsRef();
auto UDI = UniqueDeclarations.begin();
auto DeclNumLists = getDeclNumListsRef();
auto DNLI = DeclNumLists.begin();
auto ComponentListSizes = getComponentListSizesRef();
auto CLSI = ComponentListSizes.begin();
auto Components = getComponentsRef();
auto CI = Components.begin();
// Variable to compute the accumulation of the number of components.
unsigned PrevSize = 0u;
// Scan all the declarations and associated component lists.
for (auto &M : ComponentListMap) {
// The declaration.
auto *D = M.first;
// The component lists.
auto CL = M.second;
// Initialize the entry.
*UDI = D;
++UDI;
*DNLI = CL.size();
++DNLI;
// Obtain the cumulative sizes and concatenate all the components in the
// reserved storage.
for (auto C : CL) {
// Accumulate with the previous size.
PrevSize += C.size();
// Save the size.
*CLSI = PrevSize;
++CLSI;
// Append components after the current components iterator.
CI = std::copy(C.begin(), C.end(), CI);
}
}
}
/// \brief Build a clause for \a NumUniqueDeclarations declarations, \a
/// NumComponentLists total component lists, and \a NumComponents total
/// components.
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause - one
/// list for each expression in the clause.
/// \param NumComponents Total number of expression components in the clause.
///
OMPMappableExprListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPVarListClause<T>(K, StartLoc, LParenLoc, EndLoc, NumVars),
NumUniqueDeclarations(NumUniqueDeclarations),
NumComponentLists(NumComponentLists), NumComponents(NumComponents) {}
public:
/// \brief Return the number of unique base declarations in this clause.
unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; }
/// \brief Return the number of lists derived from the clause expressions.
unsigned getTotalComponentListNum() const { return NumComponentLists; }
/// \brief Return the total number of components in all lists derived from the
/// clause.
unsigned getTotalComponentsNum() const { return NumComponents; }
/// \brief Iterator that browse the components by lists. It also allows
/// browsing components of a single declaration.
class const_component_lists_iterator
: public llvm::iterator_adaptor_base<
const_component_lists_iterator,
MappableExprComponentListRef::const_iterator,
std::forward_iterator_tag, MappableComponent, ptrdiff_t,
MappableComponent, MappableComponent> {
// The declaration the iterator currently refers to.
ArrayRef<ValueDecl *>::iterator DeclCur;
// The list number associated with the current declaration.
ArrayRef<unsigned>::iterator NumListsCur;
// Remaining lists for the current declaration.
unsigned RemainingLists;
// The cumulative size of the previous list, or zero if there is no previous
// list.
unsigned PrevListSize;
// The cumulative sizes of the current list - it will delimit the remaining
// range of interest.
ArrayRef<unsigned>::const_iterator ListSizeCur;
ArrayRef<unsigned>::const_iterator ListSizeEnd;
// Iterator to the end of the components storage.
MappableExprComponentListRef::const_iterator End;
public:
/// \brief Construct an iterator that scans all lists.
explicit const_component_lists_iterator(
ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum,
ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator::iterator_adaptor_base(
Components.begin()),
DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()),
RemainingLists(0u), PrevListSize(0u),
ListSizeCur(CumulativeListSizes.begin()),
ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) {
assert(UniqueDecls.size() == DeclsListNum.size() &&
"Inconsistent number of declarations and list sizes!");
if (!DeclsListNum.empty())
RemainingLists = *NumListsCur;
}
/// \brief Construct an iterator that scan lists for a given declaration \a
/// Declaration.
explicit const_component_lists_iterator(
const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls,
ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator(UniqueDecls, DeclsListNum,
CumulativeListSizes, Components) {
// Look for the desired declaration. While we are looking for it, we
// update the state so that we know the component where a given list
// starts.
for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) {
if (*DeclCur == Declaration)
break;
assert(*NumListsCur > 0 && "No lists associated with declaration??");
// Skip the lists associated with the current declaration, but save the
// last list size that was skipped.
std::advance(ListSizeCur, *NumListsCur - 1);
PrevListSize = *ListSizeCur;
++ListSizeCur;
}
// If we didn't find any declaration, advance the iterator to after the
// last component and set remaining lists to zero.
if (ListSizeCur == CumulativeListSizes.end()) {
this->I = End;
RemainingLists = 0u;
return;
}
// Set the remaining lists with the total number of lists of the current
// declaration.
RemainingLists = *NumListsCur;
// Adjust the list size end iterator to the end of the relevant range.
ListSizeEnd = ListSizeCur;
std::advance(ListSizeEnd, RemainingLists);
// Given that the list sizes are cumulative, the index of the component
// that start the list is the size of the previous list.
std::advance(this->I, PrevListSize);
}
// Return the array with the current list. The sizes are cumulative, so the
// array size is the difference between the current size and previous one.
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator*() const {
assert(ListSizeCur != ListSizeEnd && "Invalid iterator!");
return std::make_pair(
*DeclCur,
MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize));
}
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator->() const {
return **this;
}
// Skip the components of the current list.
const_component_lists_iterator &operator++() {
assert(ListSizeCur != ListSizeEnd && RemainingLists &&
"Invalid iterator!");
// If we don't have more lists just skip all the components. Otherwise,
// advance the iterator by the number of components in the current list.
if (std::next(ListSizeCur) == ListSizeEnd) {
this->I = End;
RemainingLists = 0;
} else {
std::advance(this->I, *ListSizeCur - PrevListSize);
PrevListSize = *ListSizeCur;
// We are done with a declaration, move to the next one.
if (!(--RemainingLists)) {
++DeclCur;
++NumListsCur;
RemainingLists = *NumListsCur;
assert(RemainingLists && "No lists in the following declaration??");
}
}
++ListSizeCur;
return *this;
}
};
typedef llvm::iterator_range<const_component_lists_iterator>
const_component_lists_range;
/// \brief Iterators for all component lists.
const_component_lists_iterator component_lists_begin() const {
return const_component_lists_iterator(
getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(),
getComponentsRef());
}
const_component_lists_iterator component_lists_end() const {
return const_component_lists_iterator(
ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(),
MappableExprComponentListRef(getComponentsRef().end(),
getComponentsRef().end()));
}
const_component_lists_range component_lists() const {
return {component_lists_begin(), component_lists_end()};
}
/// \brief Iterators for component lists associated with the provided
/// declaration.
const_component_lists_iterator
decl_component_lists_begin(const ValueDecl *VD) const {
return const_component_lists_iterator(
VD, getUniqueDeclsRef(), getDeclNumListsRef(),
getComponentListSizesRef(), getComponentsRef());
}
const_component_lists_iterator decl_component_lists_end() const {
return component_lists_end();
}
const_component_lists_range decl_component_lists(const ValueDecl *VD) const {
return {decl_component_lists_begin(VD), decl_component_lists_end()};
}
/// Iterators to access all the declarations, number of lists, list sizes, and
/// components.
typedef ArrayRef<ValueDecl *>::iterator const_all_decls_iterator;
typedef llvm::iterator_range<const_all_decls_iterator> const_all_decls_range;
const_all_decls_range all_decls() const {
auto A = getUniqueDeclsRef();
return const_all_decls_range(A.begin(), A.end());
}
typedef ArrayRef<unsigned>::iterator const_all_num_lists_iterator;
typedef llvm::iterator_range<const_all_num_lists_iterator>
const_all_num_lists_range;
const_all_num_lists_range all_num_lists() const {
auto A = getDeclNumListsRef();
return const_all_num_lists_range(A.begin(), A.end());
}
typedef ArrayRef<unsigned>::iterator const_all_lists_sizes_iterator;
typedef llvm::iterator_range<const_all_lists_sizes_iterator>
const_all_lists_sizes_range;
const_all_lists_sizes_range all_lists_sizes() const {
auto A = getComponentListSizesRef();
return const_all_lists_sizes_range(A.begin(), A.end());
}
typedef ArrayRef<MappableComponent>::iterator const_all_components_iterator;
typedef llvm::iterator_range<const_all_components_iterator>
const_all_components_range;
const_all_components_range all_components() const {
auto A = getComponentsRef();
return const_all_components_range(A.begin(), A.end());
}
};
/// \brief This represents clause 'map' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target map(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'map'
/// with the variables 'a' and 'b'.
///
class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
private llvm::TrailingObjects<
OMPMapClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend TrailingObjects;
friend OMPVarListClause;
friend OMPMappableExprListClause;
friend class OMPClauseReader;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// \brief Map type modifier for the 'map' clause.
OpenMPMapClauseKind MapTypeModifier;
/// \brief Map type for the 'map' clause.
OpenMPMapClauseKind MapType;
/// \brief Is this an implicit map type or not.
bool MapTypeIsImplicit;
/// \brief Location of the map type.
SourceLocation MapLoc;
/// \brief Colon location.
SourceLocation ColonLoc;
/// \brief Set type modifier for the clause.
///
/// \param T Type Modifier for the clause.
///
void setMapTypeModifier(OpenMPMapClauseKind T) { MapTypeModifier = T; }
/// \brief Set type for the clause.
///
/// \param T Type for the clause.
///
void setMapType(OpenMPMapClauseKind T) { MapType = T; }
/// \brief Set type location.
///
/// \param TLoc Type location.
///
void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; }
/// \brief Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// \brief Build a clause for \a NumVars listed expressions, \a
/// NumUniqueDeclarations declarations, \a NumComponentLists total component
/// lists, and \a NumComponents total expression components.
///
/// \param MapTypeModifier Map type modifier.
/// \param MapType Map type.
/// \param MapTypeIsImplicit Map type is inferred implicitly.
/// \param MapLoc Location of the map type.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
///
explicit OMPMapClause(OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool MapTypeIsImplicit,
SourceLocation MapLoc, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(OMPC_map, StartLoc, LParenLoc, EndLoc,
NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents),
MapTypeModifier(MapTypeModifier), MapType(MapType),
MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
///
explicit OMPMapClause(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(
OMPC_map, SourceLocation(), SourceLocation(), SourceLocation(),
NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents),
MapTypeModifier(OMPC_MAP_unknown), MapType(OMPC_MAP_unknown),
MapTypeIsImplicit(false), MapLoc() {}
public:
/// \brief Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param TypeModifier Map type modifier.
/// \param Type Map type.
/// \param TypeIsImplicit Map type is inferred implicitly.
/// \param TypeLoc Location of the map type.
///
static OMPMapClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
OpenMPMapClauseKind TypeModifier,
OpenMPMapClauseKind Type, bool TypeIsImplicit,
SourceLocation TypeLoc);
/// \brief Creates an empty clause with the place for for \a NumVars original
/// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists
/// lists, and \a NumComponents expression components.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
///
static OMPMapClause *CreateEmpty(const ASTContext &C, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
/// \brief Fetches mapping kind for the clause.
OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; }
/// \brief Is this an implicit map type?
/// We have to capture 'IsMapTypeImplicit' from the parser for more
/// informative error messages. It helps distinguish map(r) from
/// map(tofrom: r), which is important to print more helpful error
/// messages for some target directives.
bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; }
/// \brief Fetches the map type modifier for the clause.
OpenMPMapClauseKind getMapTypeModifier() const LLVM_READONLY {
return MapTypeModifier;
}
/// \brief Fetches location of clause mapping kind.
SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; }
/// \brief Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_map;
}
child_range children() {
return child_range(
reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
};
/// \brief This represents 'num_teams' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams num_teams(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'num_teams'
/// with single expression 'n'.
///
class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief NumTeams number.
Stmt *NumTeams;
/// \brief Set the NumTeams number.
///
/// \param E NumTeams number.
///
void setNumTeams(Expr *E) { NumTeams = E; }
public:
/// \brief Build 'num_teams' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), NumTeams(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// \brief Build an empty clause.
///
OMPNumTeamsClause()
: OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this), LParenLoc(SourceLocation()),
NumTeams(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return NumTeams number.
Expr *getNumTeams() { return cast<Expr>(NumTeams); }
/// \brief Return NumTeams number.
Expr *getNumTeams() const { return cast<Expr>(NumTeams); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_teams;
}
child_range children() { return child_range(&NumTeams, &NumTeams + 1); }
};
/// \brief This represents 'thread_limit' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams thread_limit(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'thread_limit'
/// with single expression 'n'.
///
class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief ThreadLimit number.
Stmt *ThreadLimit;
/// \brief Set the ThreadLimit number.
///
/// \param E ThreadLimit number.
///
void setThreadLimit(Expr *E) { ThreadLimit = E; }
public:
/// \brief Build 'thread_limit' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPThreadLimitClause(Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_thread_limit, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// \brief Build an empty clause.
///
OMPThreadLimitClause()
: OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this), LParenLoc(SourceLocation()),
ThreadLimit(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return ThreadLimit number.
Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); }
/// \brief Return ThreadLimit number.
Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_thread_limit;
}
child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); }
};
/// \brief This represents 'priority' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task priority(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'priority' with
/// single expression 'n'.
///
class OMPPriorityClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Priority number.
Stmt *Priority;
/// \brief Set the Priority number.
///
/// \param E Priority number.
///
void setPriority(Expr *E) { Priority = E; }
public:
/// \brief Build 'priority' clause.
///
/// \param E Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc),
Priority(E) {}
/// \brief Build an empty clause.
///
OMPPriorityClause()
: OMPClause(OMPC_priority, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Priority(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return Priority number.
Expr *getPriority() { return cast<Expr>(Priority); }
/// \brief Return Priority number.
Expr *getPriority() const { return cast<Expr>(Priority); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_priority;
}
child_range children() { return child_range(&Priority, &Priority + 1); }
};
/// \brief This represents 'grainsize' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop grainsize(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'grainsize'
/// with single expression '4'.
///
class OMPGrainsizeClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Safe iteration space distance.
Stmt *Grainsize;
/// \brief Set safelen.
void setGrainsize(Expr *Size) { Grainsize = Size; }
public:
/// \brief Build 'grainsize' clause.
///
/// \param Size Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc),
Grainsize(Size) {}
/// \brief Build an empty clause.
///
explicit OMPGrainsizeClause()
: OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Grainsize(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return safe iteration space distance.
Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_grainsize;
}
child_range children() { return child_range(&Grainsize, &Grainsize + 1); }
};
/// \brief This represents 'nogroup' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp taskloop nogroup
/// \endcode
/// In this example directive '#pragma omp taskloop' has 'nogroup' clause.
///
class OMPNogroupClause : public OMPClause {
public:
/// \brief Build 'nogroup' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nogroup, StartLoc, EndLoc) {}
/// \brief Build an empty clause.
///
OMPNogroupClause()
: OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nogroup;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents 'num_tasks' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop num_tasks(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'num_tasks'
/// with single expression '4'.
///
class OMPNumTasksClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Safe iteration space distance.
Stmt *NumTasks;
/// \brief Set safelen.
void setNumTasks(Expr *Size) { NumTasks = Size; }
public:
/// \brief Build 'num_tasks' clause.
///
/// \param Size Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
///
OMPNumTasksClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumTasks(Size) {}
/// \brief Build an empty clause.
///
explicit OMPNumTasksClause()
: OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), NumTasks(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Return safe iteration space distance.
Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_tasks;
}
child_range children() { return child_range(&NumTasks, &NumTasks + 1); }
};
/// \brief This represents 'hint' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp critical (name) hint(6)
/// \endcode
/// In this example directive '#pragma omp critical' has name 'name' and clause
/// 'hint' with argument '6'.
///
class OMPHintClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Hint expression of the 'hint' clause.
Stmt *Hint;
/// \brief Set hint expression.
///
void setHint(Expr *H) { Hint = H; }
public:
/// \brief Build 'hint' clause with expression \a Hint.
///
/// \param Hint Hint expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
///
OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
Hint(Hint) {}
/// \brief Build an empty clause.
///
OMPHintClause()
: OMPClause(OMPC_hint, SourceLocation(), SourceLocation()),
LParenLoc(SourceLocation()), Hint(nullptr) {}
/// \brief Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// \brief Returns number of threads.
Expr *getHint() const { return cast_or_null<Expr>(Hint); }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_hint;
}
child_range children() { return child_range(&Hint, &Hint + 1); }
};
/// \brief This represents 'dist_schedule' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp distribute dist_schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp distribute' has 'dist_schedule'
/// clause with arguments 'static' and '3'.
///
class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief A kind of the 'schedule' clause.
OpenMPDistScheduleClauseKind Kind;
/// \brief Start location of the schedule kind in source code.
SourceLocation KindLoc;
/// \brief Location of ',' (if any).
SourceLocation CommaLoc;
/// \brief Chunk size.
Expr *ChunkSize;
/// \brief Set schedule kind.
///
/// \param K Schedule kind.
///
void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; }
/// \brief Sets the location of '('.
///
/// \param Loc Location of '('.
///
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
///
void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// \brief Set location of ','.
///
/// \param Loc Location of ','.
///
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// \brief Set chunk size.
///
/// \param E Chunk size.
///
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// \brief Build 'dist_schedule' clause with schedule kind \a Kind and chunk
/// size expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind DistSchedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
///
OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc,
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
Stmt *HelperChunkSize)
: OMPClause(OMPC_dist_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
}
/// \brief Build an empty clause.
///
explicit OMPDistScheduleClause()
: OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this), Kind(OMPC_DIST_SCHEDULE_unknown),
ChunkSize(nullptr) {}
/// \brief Get kind of the clause.
///
OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; }
/// \brief Get location of '('.
///
SourceLocation getLParenLoc() { return LParenLoc; }
/// \brief Get kind location.
///
SourceLocation getDistScheduleKindLoc() { return KindLoc; }
/// \brief Get location of ','.
///
SourceLocation getCommaLoc() { return CommaLoc; }
/// \brief Get chunk size.
///
Expr *getChunkSize() { return ChunkSize; }
/// \brief Get chunk size.
///
const Expr *getChunkSize() const { return ChunkSize; }
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dist_schedule;
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
};
/// \brief This represents 'defaultmap' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp target defaultmap(tofrom: scalar)
/// \endcode
/// In this example directive '#pragma omp target' has 'defaultmap' clause of kind
/// 'scalar' with modifier 'tofrom'.
///
class OMPDefaultmapClause : public OMPClause {
friend class OMPClauseReader;
/// \brief Location of '('.
SourceLocation LParenLoc;
/// \brief Modifiers for 'defaultmap' clause.
OpenMPDefaultmapClauseModifier Modifier;
/// \brief Locations of modifiers.
SourceLocation ModifierLoc;
/// \brief A kind of the 'defaultmap' clause.
OpenMPDefaultmapClauseKind Kind;
/// \brief Start location of the defaultmap kind in source code.
SourceLocation KindLoc;
/// \brief Set defaultmap kind.
///
/// \param K Defaultmap kind.
///
void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; }
/// \brief Set the defaultmap modifier.
///
/// \param M Defaultmap modifier.
///
void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) {
Modifier = M;
}
/// \brief Set location of the defaultmap modifier.
///
void setDefaultmapModifierLoc(SourceLocation Loc) {
ModifierLoc = Loc;
}
/// \brief Sets the location of '('.
///
/// \param Loc Location of '('.
///
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// \brief Set defaultmap kind start location.
///
/// \param KLoc Defaultmap kind location.
///
void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
public:
/// \brief Build 'defaultmap' clause with defaultmap kind \a Kind
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param EndLoc Ending location of the clause.
/// \param Kind Defaultmap kind.
/// \param M The modifier applied to 'defaultmap' clause.
/// \param MLoc Location of the modifier
///
OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation MLoc, SourceLocation KLoc,
SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind,
OpenMPDefaultmapClauseModifier M)
: OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc),
Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {}
/// \brief Build an empty clause.
///
explicit OMPDefaultmapClause()
: OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()),
Modifier(OMPC_DEFAULTMAP_MODIFIER_unknown),
Kind(OMPC_DEFAULTMAP_unknown) {}
/// \brief Get kind of the clause.
///
OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; }
/// \brief Get the modifier of the clause.
///
OpenMPDefaultmapClauseModifier getDefaultmapModifier() const {
return Modifier;
}
/// \brief Get location of '('.
///
SourceLocation getLParenLoc() { return LParenLoc; }
/// \brief Get kind location.
///
SourceLocation getDefaultmapKindLoc() { return KindLoc; }
/// \brief Get the modifier location.
///
SourceLocation getDefaultmapModifierLoc() const {
return ModifierLoc;
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_defaultmap;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// \brief This represents clause 'to' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update to(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to'
/// with the variables 'a' and 'b'.
///
class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
private llvm::TrailingObjects<
OMPToClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend TrailingObjects;
friend OMPVarListClause;
friend OMPMappableExprListClause;
friend class OMPClauseReader;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// \brief Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
///
explicit OMPToClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(OMPC_to, StartLoc, LParenLoc, EndLoc, NumVars,
NumUniqueDeclarations, NumComponentLists,
NumComponents) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
///
explicit OMPToClause(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(
OMPC_to, SourceLocation(), SourceLocation(), SourceLocation(),
NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {}
public:
/// \brief Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
///
static OMPToClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// \brief Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
///
static OMPToClause *CreateEmpty(const ASTContext &C, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_to;
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
};
/// \brief This represents clause 'from' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update from(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'from'
/// with the variables 'a' and 'b'.
///
class OMPFromClause final
: public OMPMappableExprListClause<OMPFromClause>,
private llvm::TrailingObjects<
OMPFromClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend TrailingObjects;
friend OMPVarListClause;
friend OMPMappableExprListClause;
friend class OMPClauseReader;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// \brief Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
///
explicit OMPFromClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(OMPC_from, StartLoc, LParenLoc, EndLoc,
NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents) {}
/// \brief Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
///
explicit OMPFromClause(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: OMPMappableExprListClause(
OMPC_from, SourceLocation(), SourceLocation(), SourceLocation(),
NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {}
public:
/// \brief Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
///
static OMPFromClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// \brief Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
///
static OMPFromClause *CreateEmpty(const ASTContext &C, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_from;
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
};
/// This represents clause 'use_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_ptr' with the variables 'a' and 'b'.
///
class OMPUseDevicePtrClause final
: public OMPMappableExprListClause<OMPUseDevicePtrClause>,
private llvm::TrailingObjects<
OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend TrailingObjects;
friend OMPVarListClause;
friend OMPMappableExprListClause;
friend class OMPClauseReader;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return 3 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
///
explicit OMPUseDevicePtrClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_use_device_ptr, StartLoc, LParenLoc,
EndLoc, NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents) {}
/// Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
///
explicit OMPUseDevicePtrClause(unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_use_device_ptr, SourceLocation(),
SourceLocation(), SourceLocation(), NumVars,
NumUniqueDeclarations, NumComponentLists,
NumComponents) {}
/// Sets the list of references to private copies with initializers for new
/// private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for new
/// private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new private
/// variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new private
/// variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param PrivateVars Expressions referring to private copies.
/// \param Inits Expressions referring to private copy initializers.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
///
static OMPUseDevicePtrClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> Vars,
ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
///
static OMPUseDevicePtrClause *CreateEmpty(const ASTContext &C,
unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
typedef MutableArrayRef<Expr *>::iterator private_copies_iterator;
typedef ArrayRef<const Expr *>::iterator private_copies_const_iterator;
typedef llvm::iterator_range<private_copies_iterator> private_copies_range;
typedef llvm::iterator_range<private_copies_const_iterator>
private_copies_const_range;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
typedef MutableArrayRef<Expr *>::iterator inits_iterator;
typedef ArrayRef<const Expr *>::iterator inits_const_iterator;
typedef llvm::iterator_range<inits_iterator> inits_range;
typedef llvm::iterator_range<inits_const_iterator> inits_const_range;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_use_device_ptr;
}
};
/// This represents clause 'is_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target is_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause
/// 'is_device_ptr' with the variables 'a' and 'b'.
///
class OMPIsDevicePtrClause final
: public OMPMappableExprListClause<OMPIsDevicePtrClause>,
private llvm::TrailingObjects<
OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend TrailingObjects;
friend OMPVarListClause;
friend OMPMappableExprListClause;
friend class OMPClauseReader;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Build clause with number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
///
explicit OMPIsDevicePtrClause(SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_is_device_ptr, StartLoc, LParenLoc,
EndLoc, NumVars, NumUniqueDeclarations,
NumComponentLists, NumComponents) {}
/// Build an empty clause.
///
/// \param NumVars Number of expressions listed in this clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of component lists in this clause.
/// \param NumComponents Total number of expression components in the clause.
///
explicit OMPIsDevicePtrClause(unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents)
: OMPMappableExprListClause(OMPC_is_device_ptr, SourceLocation(),
SourceLocation(), SourceLocation(), NumVars,
NumUniqueDeclarations, NumComponentLists,
NumComponents) {}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
///
static OMPIsDevicePtrClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of expressions listed in the clause.
/// \param NumUniqueDeclarations Number of unique base declarations in this
/// clause.
/// \param NumComponentLists Number of unique base declarations in this
/// clause.
/// \param NumComponents Total number of expression components in the clause.
///
static OMPIsDevicePtrClause *CreateEmpty(const ASTContext &C,
unsigned NumVars,
unsigned NumUniqueDeclarations,
unsigned NumComponentLists,
unsigned NumComponents);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_is_device_ptr;
}
};
} // end namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
|
GB_binop__le_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int16)
// A*D function (colscale): GB (_AxD__le_int16)
// D*A function (rowscale): GB (_DxB__le_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int16)
// C=scalar+B GB (_bind1st__le_int16)
// C=scalar+B' GB (_bind1st_tran__le_int16)
// C=A+scalar GB (_bind2nd__le_int16)
// C=A'+scalar GB (_bind2nd_tran__le_int16)
// C type: bool
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_fp32
// op(A') function: GB_tran__lnot_uint32_fp32
// C type: uint32_t
// A type: float
// cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_fp32
(
uint32_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB004-antidep2-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two nested loops with loop-carried anti-dependence on the outer level.
This is a variable-length array version in C99.
Data race pair: a[i][j]@70:7 vs. a[i+1][j]@70:18
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
int main(int argc,char *argv[])
{
omprace_init();
int i, j;
int len = 20;
if (argc>1)
len = atoi(argv[1]);
double a[len][len];
for (i=0; i< len; i++)
for (j=0; j<len; j++)
a[i][j] = 0.5;
#pragma omp parallel for private(j)
for (i = 0; i < len - 1; i += 1) {
for (j = 0; j < len ; j += 1) {
a[i][j] += a[i + 1][j];
}
}
omprace_fini();
return 0;
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define MaxBezierCoordinates 2097152
#define ThrowPointExpectedException(token,exception) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *,
ExceptionInfo *);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(MVGInfo *,const char *,ExceptionInfo *);
static void
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
ExceptionInfo
*exception;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
exception=AcquireExceptionInfo();
if (clone_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
exception);
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL,
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_alpha=draw_info->fill_alpha;
clone_info->stroke_alpha=draw_info->stroke_alpha;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
exception=DestroyExceptionInfo(exception);
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case AlphaPrimitive:
case ColorPrimitive:
case ImagePrimitive:
case PointPrimitive:
case TextPrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
% o exception: return any errors or warnings in this structure.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
% PolygonInfo *polygon_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info,
const PolygonInfo *polygon_info,ExceptionInfo *exception)
{
double
mid;
DrawInfo
*clone_info;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) QueryColorCompliance("#000F",AllCompliance,&clone_info->fill,
exception);
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
(void) QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke,
exception);
else
(void) QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke,
exception);
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info,exception);
}
}
(void) QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke,
exception);
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
(void) DrawPrimitive(image,clone_info,primitive_info,exception);
clone_info=DestroyDrawInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id,ExceptionInfo *exception)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageMask(image,WritePixelMask,clipping_mask,exception);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask,
*separate_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(clip_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
(void) SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception);
(void) QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
clip_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(clip_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
(void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
(void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
clone_info->clip_path=MagickTrue;
status=DrawImage(clip_mask,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(clip_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
clip_mask=DestroyImage(clip_mask);
clip_mask=separate_mask;
status=NegateImage(clip_mask,MagickFalse,exception);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask,
*separate_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL,exception);
status=SetImageExtent(composite_mask,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
(void) SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL,
exception);
(void) QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha;
composite_mask->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(composite_mask,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
(void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
(void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->alpha=OpaqueAlpha;
status=DrawImage(composite_mask,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
separate_mask=SeparateImage(composite_mask,AlphaChannel,exception);
if (separate_mask != (Image *) NULL)
{
composite_mask=DestroyImage(composite_mask);
composite_mask=separate_mask;
status=NegateImage(composite_mask,MagickFalse,exception);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
static int StopInfoCompare(const void *x,const void *y)
{
StopInfo
*stop_1,
*stop_2;
stop_1=(StopInfo *) x;
stop_2=(StopInfo *) y;
if (stop_1->offset > stop_2->offset)
return(1);
if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon)
return(0);
return(-1);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info,ExceptionInfo *exception)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo),
StopInfoCompare);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
PixelInfo
composite,
pixel;
double
alpha,
offset;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
GetPixelInfoPixel(image,q,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
MagickBooleanType
antialias;
double
repeat;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,gradient->radius);
else
repeat=fmod(offset,gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha,
&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
size_t
extent;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(size_t) mvg_info->offset+pad+4096;
if (extent <= *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=ResizeQuantumMemory(*mvg_info->primitive_info,
extent,sizeof(**mvg_info->primitive_info));
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
*mvg_info->extent=extent;
return(MagickTrue);
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
*mvg_info->primitive_info=AcquireCriticalMemory(
sizeof(**mvg_info->primitive_info));
(void) memset(*mvg_info->primitive_info,0,sizeof(**mvg_info->primitive_info));
*mvg_info->extent=1;
return(MagickFalse);
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (*token == '\0')
break;
if (*token == '#')
{
/*
Skip comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
GetNextToken(q,&q,extent,token);
start=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=0;
for (p=q; *q != '\0'; )
{
GetNextToken(p,&p,extent,token);
if (*token == '\0')
break;
if (*token == '#')
{
/*
Skip comment.
*/
while ((*p != '\n') && (*p != '\0'))
p++;
continue;
}
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if (n < 0)
{
char
*macro;
/*
Extract macro.
*/
GetNextToken(p,&p,extent,token);
macro=AcquireString(start);
macro[end-start]='\0';
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
macro=DestroyString(macro);
break;
}
}
}
}
}
token=DestroyString(token);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue);
}
static inline void TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info,
ExceptionInfo *exception)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
keyword[MagickPathExtent],
geometry[MagickPathExtent],
*next_token,
pattern[MagickPathExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points,
number_stops;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
StopInfo
*stops;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
if (status == MagickFalse)
return(status);
}
primitive=(char *) NULL;
if (*draw_info->primitive != '@')
primitive=AcquireString(draw_info->primitive);
else
if ((strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-'))
primitive=FileToString(draw_info->primitive+1,~0UL,exception);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"MVG",primitive);
n=0;
number_stops=0;
stops=(StopInfo *) NULL;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=4096;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.offset=0;
mvg_info.exception=exception;
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
cursor=0.0;
defsDepth=0;
symbolDepth=0;
macros=GetMVGMacros(primitive);
status=MagickTrue;
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
GetNextToken(q,&q,MagickPathExtent,keyword);
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
if (LocaleCompare("alpha",keyword) == 0)
{
primitive_type=AlphaPrimitive;
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->border_color,exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,exception);
if (draw_info->compliance != SVGCompliance)
(void) DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->fill,exception);
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,exception);
if (draw_info->compliance != SVGCompliance)
status=SetImageMask(image,CompositePixelMask,
graphic_context[n]->composite_mask,exception);
}
break;
}
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->fill_alpha*=opacity;
if (graphic_context[n]->fill_alpha != OpaqueAlpha)
graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha;
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
DrawError,"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(draw_info->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
(void) SetImageMask(image,WritePixelMask,(Image *) NULL,
exception);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
char
name[MaxTextExtent];
const char
*clip_path;
GetNextToken(q,&q,extent,token);
(void) FormatLocaleString(name,MaxTextExtent,"%s",token);
clip_path=(const char *) GetValueFromSplayTree(macros,name);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,name,clip_path);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent],
type[MagickPathExtent];
SegmentInfo
segment;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (LocaleCompare(type,"radial") == 0)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("mask",token) == 0)
{
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
char
key[2*MagickPathExtent],
name[MagickPathExtent];
RectangleInfo
bounds;
GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MagickPathExtent);
GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
for (p=q; *q != '\0'; )
{
GetNextToken(q,&q,extent,token);
if (LocaleCompare(token,"pop") != 0)
continue;
GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MagickPathExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MagickPathExtent,"%s-geometry",
name);
(void) FormatLocaleString(geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
PixelInfo
stop_color;
number_stops++;
if (number_stops == 1)
stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops));
else
if (number_stops > 2)
stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops,
sizeof(*stops));
if (stops == (StopInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,&stop_color,
exception);
stops[number_stops-1].color=stop_color;
GetNextToken(q,&q,extent,token);
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
stops[number_stops-1].offset=factor*StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MagickPathExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern,exception);
else
{
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->stroke,exception);
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=
graphic_context[n]->stroke_alpha;
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*r;
r=q;
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
GetNextToken(r,&r,extent,token);
if (*token == ',')
GetNextToken(r,&r,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2UL*x+2UL),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2UL*x+2UL)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(token,exception);
graphic_context[n]->stroke_alpha*=opacity;
if (graphic_context[n]->stroke_alpha != OpaqueAlpha)
graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha;
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
/* affine.tx+=cursor; */
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
status&=QueryColorCompliance(token,AllCompliance,
&graphic_context[n]->undercolor,exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
cursor=0.0;
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
DrawInfo
*clone_info;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=DrawImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(token,exception);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if (*q == '\0')
{
if (number_stops > 1)
{
GradientType
type;
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,stops,number_stops,
exception);
}
if (number_stops > 0)
stops=(StopInfo *) RelinquishMagickMemory(stops);
}
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,&q,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
primitive_info[j].text=(char *) NULL;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
if (coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(exception,GetMagickModule(),DrawError,
"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
}
break;
}
default:
break;
}
if (coordinates > MaxBezierCoordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",token);
status=MagickFalse;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,4096);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
TraceArc(&mvg_info,primitive_info[j].point,primitive_info[j+1].point,
primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(&mvg_info,token,exception);
if (coordinates == 0.0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case AlphaPrimitive:
case ColorPrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
DrawInfo
*clone_info;
TypeMetric
metrics;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if (clone_info->density != (char *) NULL)
clone_info->density=DestroyString(clone_info->density);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
(void) ConcatenateString(&clone_info->text," ");
status&=GetTypeMetrics(image,clone_info,&metrics,exception);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (draw_info->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask,exception);
status&=DrawPrimitive(image,graphic_context[n],primitive_info,
exception);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
if (stops != (StopInfo *) NULL)
stops=(StopInfo *) RelinquishMagickMemory(stops);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition",
keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern,
ExceptionInfo *exception)
{
char
property[MagickPathExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MagickPathExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info,exception);
image_info=DestroyImageInfo(image_info);
(void) QueryColorCompliance("#000000ff",AllCompliance,
&(*pattern)->background_color,exception);
(void) SetImageBackgroundColor(*pattern,exception);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MagickPathExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=DrawImage(*pattern,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetFillAlpha(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_alpha)
{
double
alpha,
beta,
distance,
subpath_alpha;
PointInfo
delta;
register const PointInfo
*q;
register EdgeInfo
*p;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_alpha=0.0;
subpath_alpha=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_alpha < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_alpha=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25)))
*stroke_alpha=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_alpha=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_alpha < (alpha*alpha))
subpath_alpha=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_alpha >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_alpha);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
fill,
status;
double
mid;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
DrawBoundingRectangles(image,draw_info,polygon_info[0],exception);
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
{
GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
double
fill_alpha,
stroke_alpha;
PixelInfo
fill_color,
stroke_color;
/*
Fill and/or stroke.
*/
fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule,
x,y,&stroke_alpha);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0;
stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0;
}
GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception);
CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception);
CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelInfoGray(&draw_info->fill) == MagickFalse) ||
(IsPixelInfoGray(&draw_info->stroke) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (draw_info->compliance == SVGCompliance)
{
status=SetImageMask(image,WritePixelMask,draw_info->clipping_mask,
exception);
status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask,
exception);
}
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case AlphaPrimitive:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
ChannelType
channel_mask;
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
(void) SetImageChannelMask(image,channel_mask);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelInfo
pixel;
register Quantum
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetPixelInfo(image,&pixel);
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel,
target;
(void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target,
exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
PixelInfo
target;
(void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y,
&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(double) draw_info->border_color.red;
target.green=(double) draw_info->border_color.green;
target.blue=(double) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,draw_info,&target,x,y,
primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue,exception);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelInfo
pixel;
GetPixelInfo(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetFillColor(draw_info,x,y,&pixel,exception);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MagickPathExtent];
Image
*composite_image;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_image=ReadInlineImage(clone_info,primitive_info->text,
exception);
else
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_image=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_image == (Image *) NULL)
{
status=0;
break;
}
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
/*
Resize image.
*/
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,
composite_geometry,exception);
}
if (composite_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel,
exception);
if (draw_info->alpha != OpaqueAlpha)
(void) SetImageAlpha(composite_image,draw_info->alpha,exception);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
status&=DrawAffineImage(image,composite_image,&affine,exception);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelInfo
fill_color;
register Quantum
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (Quantum *) NULL)
break;
GetFillColor(draw_info,x,y,&fill_color,exception);
CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,
(double) GetPixelAlpha(image,q),q);
(void) SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info,exception);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.alpha != (Quantum) TransparentAlpha))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
(void) DrawDashPolygon(draw_info,primitive_info,image,exception);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.alpha != (Quantum) TransparentAlpha) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
(void) DrawPolygonPrimitive(image,draw_info,primitive_info,
exception);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info,
exception);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info,exception);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception);
status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,ExceptionInfo *exception)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
(void) DrawPolygonPrimitive(image,draw_info,linecap,exception);
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info,
ExceptionInfo *exception)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,exception);
clone_info->stroke.alpha=(MagickRealType) TransparentAlpha;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
DrawRoundLinecap(image,draw_info,p,exception);
DrawRoundLinecap(image,draw_info,q,exception);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke,
exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->alpha=OpaqueAlpha;
draw_info->fill_alpha=OpaqueAlpha;
draw_info->stroke_alpha=OpaqueAlpha;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
draw_info->pointsize=12.0;
draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha;
draw_info->compose=OverCompositeOp;
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->border_color=clone_info->border_color;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->fill,
exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke,
exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor,
exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static void TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
TraceEllipse(mvg_info,center,radius,degrees);
}
static void TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
TracePoint(primitive_info,end);
return;
}
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
{
TraceLine(primitive_info,start,end);
return;
}
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
{
TraceLine(primitive_info,start,end);
return;
}
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+MagickEpsilon))));
p=primitive_info;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
TraceBezier(mvg_info,4);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceBezier(MVGInfo *mvg_info,const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
quantum=(size_t) MagickMin((double) quantum/number_coordinates,
(double) BezierQuantum);
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
return;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
coefficients=(double *) AcquireQuantumMemory((size_t)
number_coordinates,sizeof(*coefficients));
points=(PointInfo *) AcquireQuantumMemory((size_t) control_points,
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
TracePoint(p,points[i]);
p+=p->coordinates;
}
TracePoint(p,end);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
}
static void TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
TraceEllipse(mvg_info,start,offset,degrees);
}
static void TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
extent;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return;
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/(4.0*(MagickPI*PerceptibleReciprocal(delta)/2.0));
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
extent=(size_t) ceil((angle.y-angle.x)/step)+1;
if (CheckPrimitiveExtent(mvg_info,extent) == MagickFalse)
return;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
TracePoint(p,point);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
TracePoint(p,point);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
TracePoint(primitive_info,start);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return;
}
TracePoint(primitive_info+1,end);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
}
static size_t TracePath(MVGInfo *mvg_info,const char *path,
ExceptionInfo *exception)
{
char
*next_token,
token[MagickPathExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickBooleanType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,4);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,3);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,4);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
TraceBezier(mvg_info,3);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(token,exception);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(q,point);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(token,exception);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start,
const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
if ((fabs(start.x-end.x) < MagickEpsilon) ||
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->coordinates=0;
return;
}
p=primitive_info;
TracePoint(p,start);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,end);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
TracePoint(p,point);
p+=p->coordinates;
TracePoint(p,start);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceRoundRectangle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return;
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
TraceEllipse(mvg_info,point,arc,degrees);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,4096) == MagickFalse)
return;
p=(*mvg_info->primitive_info)+mvg_info->offset;
TracePoint(p,(*mvg_info->primitive_info+offset)->point);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
}
static void TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define CheckPathExtent(pad) \
if ((q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \
sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \
sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices+6*BezierQuantum+360;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
serial_tree_learner.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/random.h>
#include <string>
#include <cmath>
#include <cstdio>
#include <memory>
#include <random>
#include <vector>
#include "data_partition.hpp"
#include "feature_histogram.hpp"
#include "leaf_splits.hpp"
#include "split_info.hpp"
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
using namespace json11;
namespace LightGBM {
/*! \brief forward declaration */
class CostEfficientGradientBoosting;
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
friend CostEfficientGradientBoosting;
explicit SerialTreeLearner(const Config* config);
~SerialTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data) override;
void ResetConfig(const Config* config) override;
Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian,
const Json& forced_split_json) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
const score_t* gradients, const score_t* hessians) override;
void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices(used_indices, num_data);
}
void AddPredictionToScore(const Tree* tree, double* out_score) const override {
if (tree->num_leaves() <= 1) { return; }
CHECK(tree->num_leaves() <= data_partition_->num_leaves());
#pragma omp parallel for schedule(static)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
bool IsHistColWise() const override { return is_hist_colwise_; }
protected:
void GetMultiValBin(const Dataset* dataset, bool is_first_time);
virtual std::vector<int8_t> GetUsedFeatures(bool is_tree_level);
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void FindBestSplits();
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf);
/* Force splits with forced_split_json dict and then return num splits forced.*/
virtual int32_t ForceSplits(Tree* tree, const Json& forced_split_json, int* left_leaf,
int* right_leaf, int* cur_depth,
bool *aborted_last_force_split);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<int8_t> is_feature_used_;
/*! \brief used feature indices in current tree */
std::vector<int> used_feature_indices_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief store best split per feature for all leaves */
std::vector<SplitInfo> splits_per_leaf_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
std::vector<int> valid_feature_indices_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> ordered_hessians_;
#endif
/*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */
std::vector<char, Common::AlignmentAllocator<char, kAlignedSize>> is_data_in_leaf_;
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
int num_threads_;
std::vector<int> ordered_bin_indices_;
bool is_constant_hessian_;
std::unique_ptr<MultiValBin> multi_val_bin_;
bool is_hist_colwise_;
std::unique_ptr<CostEfficientGradientBoosting> cegb_;
};
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const {
if (leaf_idx >= 0) {
return data_partition_->leaf_count(leaf_idx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
Example_reduction.6.c | /*
* @@name: reduction.6c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: rt-error
*/
#include <stdio.h>
int main (void)
{
int a, i;
#pragma omp parallel shared(a) private(i)
{
#pragma omp master
a = 0;
// To avoid race conditions, add a barrier here.
#pragma omp for reduction(+:a)
for (i = 0; i < 10; i++) {
a += i;
}
#pragma omp single
printf ("Sum is %d\n", a);
}
return 0;
}
|
GB_unop__acos_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__acos_fp32_fp32
// op(A') function: GB_unop_tran__acos_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = acosf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = acosf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = acosf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOS || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__acos_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = acosf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__acos_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Act3A01283525.c | //Ian De La Garza González A01283525
//Este programa calcula el integral de la funcion de f(x)=3*sin(2x)+4
//De 1 a 4
#include <stdio.h>
#include <omp.h>
#include <math.h>
#include <stdlib.h>
double function(double x) {
return 3*sin(2*x)+4;
}
int main(int argc, char* argv[]){
//limites
double l = atof(argv[1]), r = atof(argv[2]);
//pasos
int nsteps = 1000000;
double step = (r-l)/nsteps;
double partial_Sum = 0, total_Sum = 0;
#pragma omp parallel private(partial_Sum) shared(l, r, h, nsteps, step, total_Sum)
{
#pragma omp for
for(int i = 1; i <= nsteps; i++){
partial_Sum += function(l + i*step);
}
//Create thread safe region.
#pragma omp critical
{
total_Sum += partial_Sum;
total_Sum = step*total_Sum;
}
}
printf("Total Sum: %f\n", total_Sum);
return 0;
}
|
ordered_doacross_codegen.c | // RUN: %clang_cc1 -verify -fopenmp -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK: [[KMP_DIM:%.+]] = type { i64, i64, i64 }
extern int n;
int a[10], b[10], c[10], d[10];
void foo();
// CHECK-LABEL: @main()
int main() {
int i;
// CHECK: [[DIMS:%.+]] = alloca [[KMP_DIM]],
// CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num([[IDENT:%.+]])
// CHECK: icmp
// CHECK-NEXT: br i1 %
// CHECK: [[CAST:%.+]] = bitcast [[KMP_DIM]]* [[DIMS]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 [[CAST]], i8 0, i64 24, i1 false)
// CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIMS]], i32 0, i32 1
// CHECK: store i64 %{{.+}}, i64* %
// CHECK: getelementptr inbounds [[KMP_DIM]], [[KMP_DIM]]* [[DIMS]], i32 0, i32 2
// CHECK: store i64 1, i64* %
// CHECK: [[CAST:%.+]] = bitcast [[KMP_DIM]]* [[DIMS]] to i8*
// CHECK: call void @__kmpc_doacross_init([[IDENT]], i32 [[GTID]], i32 1, i8* [[CAST]])
// CHECK: call void @__kmpc_for_static_init_4(
#pragma omp for ordered(1)
for (i = 0; i < n; ++i) {
a[i] = b[i] + 1;
foo();
// CHECK: call void [[FOO:.+]](
// CHECK: load i32, i32* [[CNT:%.+]],
// CHECK-NEXT: sext i32 %{{.+}} to i64
// CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP:%.+]],
// CHECK-NEXT: call void @__kmpc_doacross_post([[IDENT]], i32 [[GTID]], i64* [[TMP]])
#pragma omp ordered depend(source)
c[i] = c[i] + 1;
foo();
// CHECK: call void [[FOO]]
// CHECK: load i32, i32* [[CNT]],
// CHECK-NEXT: sub nsw i32 %{{.+}}, 2
// CHECK-NEXT: sext i32 %{{.+}} to i64
// CHECK-NEXT: store i64 %{{.+}}, i64* [[TMP:%.+]],
// CHECK-NEXT: call void @__kmpc_doacross_wait([[IDENT]], i32 [[GTID]], i64* [[TMP]])
#pragma omp ordered depend(sink : i - 2)
d[i] = a[i - 2];
}
// CHECK: call void @__kmpc_for_static_fini(
// CHECK: call void @__kmpc_doacross_fini([[IDENT]], i32 [[GTID]])
// CHECK: ret i32 0
return 0;
}
#endif // HEADER
|
io.c | /* -*- mode: C; tab-width: 2; indent-tabs-mode: nil; fill-column: 79; coding: iso-latin-1-unix -*- */
/*
hpcc.c
*/
#include <hpcc.h>
#include <ctype.h>
#include <string.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static double HPCC_MemProc = -1.0, HPCC_MemVal = -1.0;
static int HPCC_MemSpec = -1;
static int
ReadInts(char *buf, int n, int *val) {
int i, j;
for (j = i = 0; i < n; i++) {
if (sscanf( buf + j, "%d", val + i ) != 1) {
i--;
break;
}
for (; buf[j] && isdigit(buf[j]); j++)
; /* EMPTY */
for (; buf[j] && ! isdigit(buf[j]); j++)
; /* EMPTY */
if (! buf[j]) {
i--;
break;
}
}
return i + 1;
}
static int
HPCC_InitHPL(HPCC_Params *p) {
HPL_pdinfo( &p->test, &p->ns, p->nval, &p->nbs, p->nbval, &p->porder, &p->npqs, p->pval,
p->qval, &p->npfs, p->pfaval, &p->nbms, p->nbmval, &p->ndvs, p->ndvval, &p->nrfs,
p->rfaval, &p->ntps, p->topval, &p->ndhs, p->ndhval, &p->fswap, &p->tswap,
&p->L1notran, &p->Unotran, &p->equil, &p->align );
if (p->test.thrsh <= 0.0) p->Failure = 1;
return 0;
}
static int
iiamax(int n, int *x, int incx) {
int i, v, mx, idx = 0;
idx = 0;
mx = (x[0] < 0 ? -x[0] : x[0]);
for (i = 0; i < n; i += incx) {
v = (x[i] < 0 ? -x[i] : x[i]);
if (mx < v) {mx = v; idx = i;}
}
return idx;
}
static void
icopy(int n, int *src, int sinc, int *dst, int dinc) {
int i;
for (i = n; i; i--) {
*dst = *src;
dst += dinc;
src += sinc;
}
}
int
HPCC_InputFileInit(HPCC_Params *params) {
int myRank, commSize;
int i, j, n, ioErr, lastConfigLine = 32, line, rv, maxHPLn;
char buf[82]; int nbuf = 82;
FILE *f, *outputFile;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Comm_size( comm, &commSize );
MPI_Comm_rank( comm, &myRank );
if (0 == myRank) {
f = fopen( params->inFname, "r" );
if (! f) {
ioErr = 1;
goto ioEnd;
}
/* skip irrelevant lines in config file */
for (line = 0; line < lastConfigLine; line++)
if (! fgets( buf, nbuf, f )) break;
if (line < lastConfigLine) { /* if didn't read all the required lines */
ioErr = 1;
goto ioEnd;
}
/* Get values of N for PTRANS */
line++;
fgets( buf, nbuf, f );
rv = sscanf( buf, "%d", &n );
if (rv != 1 || n < 0) { /* parse error or negative value*/
n = 0;
BEGIN_IO(myRank, params->outFname, outputFile);
fprintf( outputFile, "Error in line %d of the input file.\n", line );
END_IO( myRank, outputFile );
}
n = Mmin( n, HPL_MAX_PARAM );
line++;
fgets( buf, nbuf, f );
ReadInts( buf, n, params->PTRANSnval );
/* find the largest matrix for HPL */
maxHPLn = params->nval[iiamax( params->ns, params->nval, 1 )];
for (j = i = 0; i < n; i++) {
/* if memory for PTRANS is at least 90% of what's used for HPL */
if (params->PTRANSnval[i] >= 0.9486 * maxHPLn * 0.5) {
params->PTRANSnval[j] = params->PTRANSnval[i];
j++;
}
}
n = j; /* only this many entries use enough memory */
/* copy matrix sizes from HPL, divide by 2 so both PTRANS matrices (plus "work" arrays) occupy
as much as HPL's one */
for (i = 0; i < params->ns; i++)
params->PTRANSnval[i + n] = params->nval[i] / 2;
params->PTRANSns = n + params->ns;
/* Get values of block sizes */
line++;
fgets( buf, nbuf, f );
rv = sscanf( buf, "%d", &n );
if (rv != 1 || n < 0) { /* parse error or negative value*/
n = 0;
BEGIN_IO( myRank, params->outFname, outputFile );
fprintf( outputFile, "Error in line %d of the input file.\n", line );
END_IO( myRank, outputFile );
}
n = Mmin( n, HPL_MAX_PARAM );
line++;
fgets( buf, nbuf, f );
ReadInts( buf, n, params->PTRANSnbval );
icopy( params->nbs, params->nbval, 1, params->PTRANSnbval + n, 1 );
params->PTRANSnbs = n + params->nbs;
ioErr = 0;
ioEnd:
if (f) fclose( f );
}
MPI_Bcast( &ioErr, 1, MPI_INT, 0, comm );
if (ioErr) {
/* copy matrix sizes from HPL, divide by 2 so both PTRANS matrices (plus "work" arrays) occupy
as much as HPL's one */
for (i = 0; i < params->ns; i++)
params->PTRANSnval[i] = params->nval[i] / 2;
params->PTRANSns = params->ns;
icopy( params->nbs, params->nbval, 1, params->PTRANSnbval, 1 );
params->PTRANSnbs = params->nbs;
}
/* broadcast what's been read on node 0 */
MPI_Bcast( ¶ms->PTRANSns, 1, MPI_INT, 0, comm );
if (params->PTRANSns > 0)
MPI_Bcast( ¶ms->PTRANSnval, params->PTRANSns, MPI_INT, 0, comm );
MPI_Bcast( ¶ms->PTRANSnbs, 1, MPI_INT, 0, comm );
if (params->PTRANSnbs > 0)
MPI_Bcast( ¶ms->PTRANSnbval, params->PTRANSnbs, MPI_INT, 0, comm );
/* copy what HPL has */
params->PTRANSnpqs = params->npqs;
icopy( params->npqs, params->qval, 1, params->PTRANSqval, 1 );
icopy( params->npqs, params->pval, 1, params->PTRANSpval, 1 );
return ioErr;
}
static int
ErrorReduce(FILE *f, char *str, int eCode, MPI_Comm comm) {
int rCode;
if (eCode) eCode = 1; /* make sure error is indicated with 1 */
MPI_Allreduce( &eCode, &rCode, 1, MPI_INT, MPI_SUM, comm );
if (rCode) {
if (f)
fprintf( f, str );
return -1;
}
return 0;
}
int
HPCC_Init(HPCC_Params *params) {
int myRank, commSize;
int i, nMax, nbMax, procCur, procMax, procMin, errCode;
double totalMem;
char inFname[12] = "hpccinf.txt", outFname[13] = "hpccoutf.txt";
FILE *outputFile;
MPI_Comm comm = MPI_COMM_WORLD;
time_t currentTime;
char hostname[MPI_MAX_PROCESSOR_NAME + 1]; int hostnameLen;
size_t hpl_mem, ptrans_mem;
long dMemSize;
outputFile = NULL;
MPI_Comm_size( comm, &commSize );
MPI_Comm_rank( comm, &myRank );
strcpy( params->inFname, inFname );
strcpy( params->outFname, outFname );
if (0 == myRank)
outputFile = fopen( params->outFname, "a" );
errCode = 0;
if (sizeof(u64Int) < 8 || sizeof(s64Int) < 8) errCode = 1;
if (ErrorReduce( outputFile, "No 64-bit integer type available.", errCode, comm ))
return -1;
i = MPI_Get_processor_name( hostname, &hostnameLen );
if (i) hostname[0] = 0;
else hostname[Mmax(hostnameLen, MPI_MAX_PROCESSOR_NAME)] = 0;
time( ¤tTime );
BEGIN_IO( myRank, params->outFname, outputFile );
fprintf( outputFile,
"########################################################################\n" );
fprintf( outputFile,
"This is the DARPA/DOE HPC Challenge Benchmark version %d.%d.%d October 2003\n",
HPCC_VERSION_MAJOR, HPCC_VERSION_MINOR, HPCC_VERSION_MICRO );
fprintf( outputFile, "Produced by Jack Dongarra and Piotr Luszczek\n" );
fprintf( outputFile, "Innovative Computing Laboratory\n" );
fprintf( outputFile, "University of Tennessee Knoxville and Oak Ridge National Laboratory\n\n" );
fprintf( outputFile, "See the source files for authors of specific codes.\n" );
fprintf( outputFile, "Compiled on %s at %s\n", __DATE__ , __TIME__ );
fprintf( outputFile, "Current time (%ld) is %s\n",(long)currentTime,ctime(¤tTime));
fprintf( outputFile, "Hostname: '%s'\n", hostname );
fprintf( outputFile,
"########################################################################\n" );
END_IO( myRank, outputFile );
params->Failure = 0;
HPCC_InitHPL( params ); /* HPL calls exit() if there is a problem */
HPCC_InputFileInit( params );
params->RunHPL = 0;
params->RunStarDGEMM = 0;
params->RunSingleDGEMM = 0;
params->RunPTRANS = 0;
params->RunStarStream = 0;
params->RunSingleStream = 0;
params->RunMPIRandomAccess_LCG = 0;
params->RunStarRandomAccess_LCG = 0;
params->RunSingleRandomAccess_LCG = 0;
params->RunMPIRandomAccess = 0;
params->RunStarRandomAccess = 0;
params->RunSingleRandomAccess = 0;
params->RunLatencyBandwidth = 0;
params->RunMPIFFT = 0;
params->RunHPL = params->RunStarDGEMM = params->RunSingleDGEMM =
params->RunPTRANS = params->RunStarStream = params->RunSingleStream =
params->RunMPIRandomAccess_LCG = params->RunStarRandomAccess_LCG = params->RunSingleRandomAccess_LCG =
params->RunMPIRandomAccess = params->RunStarRandomAccess = params->RunSingleRandomAccess =
params->RunMPIFFT = params->RunStarFFT = params->RunSingleFFT =
params->RunLatencyBandwidth = 1;
params->MPIRandomAccess_LCG_GUPs =
params->MPIRandomAccess_GUPs = params->StarGUPs = params->SingleGUPs =
params->StarDGEMMGflops = params->SingleDGEMMGflops = -1.0;
params->StarStreamCopyGBs = params->StarStreamScaleGBs = params->StarStreamAddGBs =
params->StarStreamTriadGBs = params->SingleStreamCopyGBs = params->SingleStreamScaleGBs =
params->SingleStreamAddGBs = params->SingleStreamTriadGBs =
params->SingleFFTGflops = params->StarFFTGflops = params->MPIFFTGflops = params->MPIFFT_maxErr =
params->MaxPingPongLatency = params-> RandomlyOrderedRingLatency = params-> MinPingPongBandwidth =
params->NaturallyOrderedRingBandwidth = params->RandomlyOrderedRingBandwidth =
params->MinPingPongLatency = params->AvgPingPongLatency = params->MaxPingPongBandwidth =
params->AvgPingPongBandwidth = params->NaturallyOrderedRingLatency = -1.0;
params->HPLrdata.Gflops = -1000.0;
params->HPLrdata.time = params->HPLrdata.eps = params->HPLrdata.RnormI = params->HPLrdata.Anorm1 = params->HPLrdata.AnormI = params->HPLrdata.Xnorm1 = params->HPLrdata.XnormI = -1.0;
params->HPLrdata.N = params->HPLrdata.NB = params->HPLrdata.nprow = params->HPLrdata.npcol = params->HPLrdata.depth = params->HPLrdata.nbdiv = params->HPLrdata.nbmin = -1;
params->HPLrdata.cpfact = params->HPLrdata.crfact = params->HPLrdata.ctop = params->HPLrdata.order = '-';
params->PTRANSrdata.GBs = params->PTRANSrdata.time = params->PTRANSrdata.residual = -1.0;
params->PTRANSrdata.n = params->PTRANSrdata.nb = params->PTRANSrdata.nprow =
params->PTRANSrdata.npcol = -1;
params->MPIRandomAccess_LCG_ErrorsFraction =
params->MPIRandomAccess_ErrorsFraction =
params->MPIRandomAccess_LCG_time = params->MPIRandomAccess_LCG_CheckTime =
params->MPIRandomAccess_time = params->MPIRandomAccess_CheckTime =
params->MPIRandomAccess_LCG_TimeBound =
params->MPIRandomAccess_TimeBound = -1.0;
params->DGEMM_N =
params->FFT_N =
params->StreamVectorSize =
params->MPIRandomAccess_LCG_Algorithm =
params->MPIRandomAccess_Algorithm =
params->MPIFFT_Procs = -1;
params->StreamThreads = 1;
params->FFTEnblk = params->FFTEnp = params->FFTEl2size = -1;
params->MPIFFT_N =
params->RandomAccess_LCG_N =
params->MPIRandomAccess_LCG_N =
params->MPIRandomAccess_LCG_Errors =
params->RandomAccess_N =
params->MPIRandomAccess_N =
params->MPIRandomAccess_Errors =
params->MPIRandomAccess_LCG_ExeUpdates =
params->MPIRandomAccess_ExeUpdates = (s64Int)(-1);
procMax = procMin = params->pval[0] * params->qval[0];
for (i = 1; i < params->npqs; ++i) {
procCur = params->pval[i] * params->qval[i];
if (procMax < procCur) procMax = procCur;
if (procMin > procCur) procMin = procCur;
}
params->HPLMaxProc = procMax;
params->HPLMinProc = procMin;
nMax = params->nval[iiamax( params->ns, params->nval, 1 )];
/* totalMem = (nMax*nMax) * sizeof(double) */
totalMem = nMax;
totalMem *= nMax;
totalMem *= sizeof(double);
params->HPLMaxProcMem = totalMem / procMin;
for (i = 0; i < MPIFFT_TIMING_COUNT; i++)
params->MPIFFTtimingsForward[i] = 0.0;
i = iiamax( params->PTRANSnbs, params->PTRANSnbval, 1 );
nbMax = params->PTRANSnbval[i];
#ifdef HPCC_MEMALLCTR
MaxMem( commSize, 0, 0, params->PTRANSns, params->PTRANSnval, params->PTRANSnval, params->PTRANSnbs, params->PTRANSnbval, params->PTRANSnbval, params->PTRANSnpqs, params->PTRANSpval, params->PTRANSqval, &dMemSize );
ptrans_mem = dMemSize * sizeof(double) + 3 * commSize * sizeof(int);
hpl_mem = params->HPLMaxProcMem + (nMax + nbMax) * sizeof(double) * nbMax;
HPCC_alloc_init( Mmax( ptrans_mem, hpl_mem ) );
#endif
return 0;
}
int
HPCC_Finalize(HPCC_Params *params) {
int myRank, commSize;
int i;
FILE *outputFile;
MPI_Comm comm = MPI_COMM_WORLD;
time_t currentTime;
#ifdef HPCC_MEMALLCTR
HPCC_alloc_finalize();
#endif
time( ¤tTime );
MPI_Comm_rank( comm, &myRank );
MPI_Comm_size( comm, &commSize );
BEGIN_IO(myRank, params->outFname, outputFile);
fprintf( outputFile, "Begin of Summary section.\n" );
fprintf( outputFile, "VersionMajor=%d\n", HPCC_VERSION_MAJOR );
fprintf( outputFile, "VersionMinor=%d\n", HPCC_VERSION_MINOR );
fprintf( outputFile, "VersionMicro=%d\n", HPCC_VERSION_MICRO );
fprintf( outputFile, "VersionRelease=%c\n", HPCC_VERSION_RELEASE );
fprintf( outputFile, "LANG=%s\n", "C" );
fprintf( outputFile, "Success=%d\n", params->Failure ? 0 : 1 );
fprintf( outputFile, "sizeof_char=%d\n", (int)sizeof(char) );
fprintf( outputFile, "sizeof_short=%d\n", (int)sizeof(short) );
fprintf( outputFile, "sizeof_int=%d\n", (int)sizeof(int) );
fprintf( outputFile, "sizeof_long=%d\n", (int)sizeof(long) );
fprintf( outputFile, "sizeof_void_ptr=%d\n", (int)sizeof(void*) );
fprintf( outputFile, "sizeof_size_t=%d\n", (int)sizeof(size_t) );
fprintf( outputFile, "sizeof_float=%d\n", (int)sizeof(float) );
fprintf( outputFile, "sizeof_double=%d\n", (int)sizeof(double) );
fprintf( outputFile, "sizeof_s64Int=%d\n", (int)sizeof(s64Int) );
fprintf( outputFile, "sizeof_u64Int=%d\n", (int)sizeof(u64Int) );
fprintf( outputFile, "sizeof_struct_double_double=%d\n", (int)sizeof(struct{double HPCC_r,HPCC_i;}) );
fprintf( outputFile, "CommWorldProcs=%d\n", commSize );
fprintf( outputFile, "MPI_Wtick=%e\n", MPI_Wtick() );
fprintf( outputFile, "HPL_Tflops=%g\n", params->HPLrdata.Gflops * 1e-3 );
fprintf( outputFile, "HPL_time=%g\n", params->HPLrdata.time );
fprintf( outputFile, "HPL_eps=%g\n", params->HPLrdata.eps );
fprintf( outputFile, "HPL_RnormI=%g\n", params->HPLrdata.RnormI );
fprintf( outputFile, "HPL_Anorm1=%g\n", params->HPLrdata.Anorm1 );
fprintf( outputFile, "HPL_AnormI=%g\n", params->HPLrdata.AnormI );
fprintf( outputFile, "HPL_Xnorm1=%g\n", params->HPLrdata.Xnorm1 );
fprintf( outputFile, "HPL_XnormI=%g\n", params->HPLrdata.XnormI );
fprintf( outputFile, "HPL_BnormI=%g\n", params->HPLrdata.BnormI );
fprintf( outputFile, "HPL_N=%d\n", params->HPLrdata.N );
fprintf( outputFile, "HPL_NB=%d\n", params->HPLrdata.NB );
fprintf( outputFile, "HPL_nprow=%d\n", params->HPLrdata.nprow );
fprintf( outputFile, "HPL_npcol=%d\n", params->HPLrdata.npcol );
fprintf( outputFile, "HPL_depth=%d\n", params->HPLrdata.depth );
fprintf( outputFile, "HPL_nbdiv=%d\n", params->HPLrdata.nbdiv );
fprintf( outputFile, "HPL_nbmin=%d\n", params->HPLrdata.nbmin );
fprintf( outputFile, "HPL_cpfact=%c\n", params->HPLrdata.cpfact );
fprintf( outputFile, "HPL_crfact=%c\n", params->HPLrdata.crfact );
fprintf( outputFile, "HPL_ctop=%c\n", params->HPLrdata.ctop );
fprintf( outputFile, "HPL_order=%c\n", params->HPLrdata.order );
fprintf( outputFile, "HPL_dMACH_EPS=%e\n", HPL_dlamch( HPL_MACH_EPS ) );
fprintf( outputFile, "HPL_dMACH_SFMIN=%e\n",HPL_dlamch( HPL_MACH_SFMIN ) );
fprintf( outputFile, "HPL_dMACH_BASE=%e\n", HPL_dlamch( HPL_MACH_BASE ) );
fprintf( outputFile, "HPL_dMACH_PREC=%e\n", HPL_dlamch( HPL_MACH_PREC ) );
fprintf( outputFile, "HPL_dMACH_MLEN=%e\n", HPL_dlamch( HPL_MACH_MLEN ) );
fprintf( outputFile, "HPL_dMACH_RND=%e\n", HPL_dlamch( HPL_MACH_RND ) );
fprintf( outputFile, "HPL_dMACH_EMIN=%e\n", HPL_dlamch( HPL_MACH_EMIN ) );
fprintf( outputFile, "HPL_dMACH_RMIN=%e\n", HPL_dlamch( HPL_MACH_RMIN ) );
fprintf( outputFile, "HPL_dMACH_EMAX=%e\n", HPL_dlamch( HPL_MACH_EMAX ) );
fprintf( outputFile, "HPL_dMACH_RMAX=%e\n", HPL_dlamch( HPL_MACH_RMAX ) );
fprintf( outputFile, "HPL_sMACH_EPS=%e\n", (double)HPL_slamch( HPL_MACH_EPS ) );
fprintf( outputFile, "HPL_sMACH_SFMIN=%e\n",(double)HPL_slamch( HPL_MACH_SFMIN ) );
fprintf( outputFile, "HPL_sMACH_BASE=%e\n", (double)HPL_slamch( HPL_MACH_BASE ) );
fprintf( outputFile, "HPL_sMACH_PREC=%e\n", (double)HPL_slamch( HPL_MACH_PREC ) );
fprintf( outputFile, "HPL_sMACH_MLEN=%e\n", (double)HPL_slamch( HPL_MACH_MLEN ) );
fprintf( outputFile, "HPL_sMACH_RND=%e\n", (double)HPL_slamch( HPL_MACH_RND ) );
fprintf( outputFile, "HPL_sMACH_EMIN=%e\n", (double)HPL_slamch( HPL_MACH_EMIN ) );
fprintf( outputFile, "HPL_sMACH_RMIN=%e\n", (double)HPL_slamch( HPL_MACH_RMIN ) );
fprintf( outputFile, "HPL_sMACH_EMAX=%e\n", (double)HPL_slamch( HPL_MACH_EMAX ) );
fprintf( outputFile, "HPL_sMACH_RMAX=%e\n", (double)HPL_slamch( HPL_MACH_RMAX ) );
fprintf( outputFile, "dweps=%e\n", HPCC_dweps() );
fprintf( outputFile, "sweps=%e\n", (double)HPCC_sweps() );
fprintf( outputFile, "HPLMaxProcs=%d\n", params->HPLMaxProc );
fprintf( outputFile, "HPLMinProcs=%d\n", params->HPLMinProc );
fprintf( outputFile, "DGEMM_N=%d\n", params->DGEMM_N );
fprintf( outputFile, "StarDGEMM_Gflops=%g\n", params->StarDGEMMGflops );
fprintf( outputFile, "SingleDGEMM_Gflops=%g\n", params->SingleDGEMMGflops );
fprintf( outputFile, "PTRANS_GBs=%g\n", params->PTRANSrdata.GBs );
fprintf( outputFile, "PTRANS_time=%g\n", params->PTRANSrdata.time );
fprintf( outputFile, "PTRANS_residual=%g\n", params->PTRANSrdata.residual );
fprintf( outputFile, "PTRANS_n=%d\n", params->PTRANSrdata.n );
fprintf( outputFile, "PTRANS_nb=%d\n", params->PTRANSrdata.nb );
fprintf( outputFile, "PTRANS_nprow=%d\n", params->PTRANSrdata.nprow );
fprintf( outputFile, "PTRANS_npcol=%d\n", params->PTRANSrdata.npcol );
fprintf( outputFile, "MPIRandomAccess_LCG_N=" FSTR64 "\n", params->MPIRandomAccess_LCG_N );
fprintf( outputFile, "MPIRandomAccess_LCG_time=%g\n", params->MPIRandomAccess_LCG_time );
fprintf( outputFile, "MPIRandomAccess_LCG_CheckTime=%g\n", params->MPIRandomAccess_LCG_CheckTime );
fprintf( outputFile, "MPIRandomAccess_LCG_Errors=" FSTR64 "\n", params->MPIRandomAccess_LCG_Errors );
fprintf( outputFile, "MPIRandomAccess_LCG_ErrorsFraction=%g\n", params->MPIRandomAccess_LCG_ErrorsFraction );
fprintf( outputFile, "MPIRandomAccess_LCG_ExeUpdates=" FSTR64 "\n", params->MPIRandomAccess_LCG_ExeUpdates );
fprintf( outputFile, "MPIRandomAccess_LCG_GUPs=%g\n", params->MPIRandomAccess_LCG_GUPs );
fprintf( outputFile, "MPIRandomAccess_LCG_TimeBound=%g\n", params->MPIRandomAccess_LCG_TimeBound );
fprintf( outputFile, "MPIRandomAccess_LCG_Algorithm=%d\n", params->MPIRandomAccess_LCG_Algorithm );
fprintf( outputFile, "MPIRandomAccess_N=" FSTR64 "\n", params->MPIRandomAccess_N );
fprintf( outputFile, "MPIRandomAccess_time=%g\n", params->MPIRandomAccess_time );
fprintf( outputFile, "MPIRandomAccess_CheckTime=%g\n", params->MPIRandomAccess_CheckTime );
fprintf( outputFile, "MPIRandomAccess_Errors=" FSTR64 "\n", params->MPIRandomAccess_Errors );
fprintf( outputFile, "MPIRandomAccess_ErrorsFraction=%g\n", params->MPIRandomAccess_ErrorsFraction );
fprintf( outputFile, "MPIRandomAccess_ExeUpdates=" FSTR64 "\n", params->MPIRandomAccess_ExeUpdates );
fprintf( outputFile, "MPIRandomAccess_GUPs=%g\n", params->MPIRandomAccess_GUPs );
fprintf( outputFile, "MPIRandomAccess_TimeBound=%g\n", params->MPIRandomAccess_TimeBound );
fprintf( outputFile, "MPIRandomAccess_Algorithm=%d\n", params->MPIRandomAccess_Algorithm );
fprintf( outputFile, "RandomAccess_LCG_N=" FSTR64 "\n", params->RandomAccess_LCG_N );
fprintf( outputFile, "StarRandomAccess_LCG_GUPs=%g\n", params->Star_LCG_GUPs );
fprintf( outputFile, "SingleRandomAccess_LCG_GUPs=%g\n", params->Single_LCG_GUPs );
fprintf( outputFile, "RandomAccess_N=" FSTR64 "\n", params->RandomAccess_N );
fprintf( outputFile, "StarRandomAccess_GUPs=%g\n", params->StarGUPs );
fprintf( outputFile, "SingleRandomAccess_GUPs=%g\n", params->SingleGUPs );
fprintf( outputFile, "STREAM_VectorSize=%d\n", params->StreamVectorSize );
fprintf( outputFile, "STREAM_Threads=%d\n", params->StreamThreads );
fprintf( outputFile, "StarSTREAM_Copy=%g\n", params->StarStreamCopyGBs );
fprintf( outputFile, "StarSTREAM_Scale=%g\n", params->StarStreamScaleGBs );
fprintf( outputFile, "StarSTREAM_Add=%g\n", params->StarStreamAddGBs );
fprintf( outputFile, "StarSTREAM_Triad=%g\n", params->StarStreamTriadGBs );
fprintf( outputFile, "SingleSTREAM_Copy=%g\n", params->SingleStreamCopyGBs );
fprintf( outputFile, "SingleSTREAM_Scale=%g\n", params->SingleStreamScaleGBs );
fprintf( outputFile, "SingleSTREAM_Add=%g\n", params->SingleStreamAddGBs );
fprintf( outputFile, "SingleSTREAM_Triad=%g\n", params->SingleStreamTriadGBs );
fprintf( outputFile, "FFT_N=%d\n", params->FFT_N );
fprintf( outputFile, "StarFFT_Gflops=%g\n", params->StarFFTGflops );
fprintf( outputFile, "SingleFFT_Gflops=%g\n", params->SingleFFTGflops );
fprintf( outputFile, "MPIFFT_N=" FSTR64 "\n", params->MPIFFT_N );
fprintf( outputFile, "MPIFFT_Gflops=%g\n", params->MPIFFTGflops );
fprintf( outputFile, "MPIFFT_maxErr=%g\n", params->MPIFFT_maxErr );
fprintf( outputFile, "MPIFFT_Procs=%d\n", params->MPIFFT_Procs );
fprintf( outputFile, "MaxPingPongLatency_usec=%g\n", params->MaxPingPongLatency );
fprintf( outputFile, "RandomlyOrderedRingLatency_usec=%g\n", params->RandomlyOrderedRingLatency );
fprintf( outputFile, "MinPingPongBandwidth_GBytes=%g\n", params->MinPingPongBandwidth );
fprintf( outputFile, "NaturallyOrderedRingBandwidth_GBytes=%g\n", params->NaturallyOrderedRingBandwidth );
fprintf( outputFile, "RandomlyOrderedRingBandwidth_GBytes=%g\n", params->RandomlyOrderedRingBandwidth );
fprintf( outputFile, "MinPingPongLatency_usec=%g\n", params->MinPingPongLatency );
fprintf( outputFile, "AvgPingPongLatency_usec=%g\n", params->AvgPingPongLatency );
fprintf( outputFile, "MaxPingPongBandwidth_GBytes=%g\n", params->MaxPingPongBandwidth );
fprintf( outputFile, "AvgPingPongBandwidth_GBytes=%g\n", params->AvgPingPongBandwidth );
fprintf( outputFile, "NaturallyOrderedRingLatency_usec=%g\n", params->NaturallyOrderedRingLatency );
fprintf( outputFile, "FFTEnblk=%d\n", params->FFTEnblk );
fprintf( outputFile, "FFTEnp=%d\n", params->FFTEnp );
fprintf( outputFile, "FFTEl2size=%d\n", params->FFTEl2size );
#ifdef _OPENMP
fprintf( outputFile, "M_OPENMP=%ld\n", (long)(_OPENMP) );
#pragma omp parallel
{
#pragma omp single nowait
{
fprintf( outputFile, "omp_get_num_threads=%d\n", omp_get_num_threads() );
fprintf( outputFile, "omp_get_max_threads=%d\n", omp_get_max_threads() );
fprintf( outputFile, "omp_get_num_procs=%d\n", omp_get_num_procs() );
}
}
#else
fprintf( outputFile, "M_OPENMP=%ld\n", -1L );
fprintf( outputFile, "omp_get_num_threads=%d\n", 0 );
fprintf( outputFile, "omp_get_max_threads=%d\n", 0 );
fprintf( outputFile, "omp_get_num_procs=%d\n", 0 );
#endif
fprintf( outputFile, "MemProc=%g\n", HPCC_MemProc );
fprintf( outputFile, "MemSpec=%d\n", HPCC_MemSpec );
fprintf( outputFile, "MemVal=%g\n", HPCC_MemVal );
for (i = 0; i < MPIFFT_TIMING_COUNT - 1; i++)
fprintf( outputFile, "MPIFFT_time%d=%g\n", i, params->MPIFFTtimingsForward[i+1] - params->MPIFFTtimingsForward[i] );
/* CPS: C Preprocessor Symbols */
i = 0;
#ifdef HPCC_FFT_235
i = 1;
#endif
fprintf( outputFile, "CPS_HPCC_FFT_235=%d\n", i );
i = 0;
#ifdef HPCC_FFTW_ESTIMATE
i = 1;
#endif
fprintf( outputFile, "CPS_HPCC_FFTW_ESTIMATE=%d\n", i );
i = 0;
#ifdef HPCC_MEMALLCTR
i = 1;
#endif
fprintf( outputFile, "CPS_HPCC_MEMALLCTR=%d\n", i );
i = 0;
#ifdef HPL_USE_GETPROCESSTIMES
i = 1;
#endif
fprintf( outputFile, "CPS_HPL_USE_GETPROCESSTIMES=%d\n", i );
i = 0;
#ifdef RA_SANDIA_NOPT
i = 1;
#endif
fprintf( outputFile, "CPS_RA_SANDIA_NOPT=%d\n", i );
i = 0;
#ifdef RA_SANDIA_OPT2
i = 1;
#endif
fprintf( outputFile, "CPS_RA_SANDIA_OPT2=%d\n", i );
i = 0;
#ifdef USING_FFTW
i = 1;
#endif
fprintf( outputFile, "CPS_USING_FFTW=%d\n", i );
fprintf( outputFile, "End of Summary section.%s\n", "" );
fprintf( outputFile,
"########################################################################\n" );
fprintf( outputFile, "End of HPC Challenge tests.\n" );
fprintf( outputFile, "Current time (%ld) is %s\n",(long)currentTime,ctime(¤tTime));
fprintf( outputFile,
"########################################################################\n" );
END_IO( myRank, outputFile );
return 0;
}
int
HPCC_LocalVectorSize(HPCC_Params *params, int vecCnt, size_t size, int pow2) {
int flg2, maxIntBits2;
/* this is the maximum power of 2 that that can be held in a signed integer (for a 4-byte
integer, 2**31-1 is the maximum integer, so the maximum power of 2 is 30) */
maxIntBits2 = sizeof(int) * 8 - 2;
/* flg2 = floor(log2(params->HPLMaxProcMem / size / vecCnt)) */
for (flg2 = 1; params->HPLMaxProcMem / size / vecCnt >> flg2; ++flg2)
; /* EMPTY */
--flg2;
if (flg2 <= maxIntBits2) {
if (pow2)
return 1 << flg2;
return params->HPLMaxProcMem / size / vecCnt;
}
return 1 << maxIntBits2;
}
int
HPCC_ProcessGrid(int *P, int *Q, MPI_Comm comm) {
int myRank, commSize;
int i, p, q, nproc;
MPI_Comm_size( comm, &commSize );
MPI_Comm_rank( comm, &myRank );
for (nproc = commSize; ; --nproc) { /* this loop makes at most two iterations */
for (i = (int)sqrt( nproc ); i > 1; --i) {
q = nproc / i;
p = nproc / q;
if (p * q == nproc) {
*P = p;
*Q = q;
return 0;
}
}
/* if the code gets here `nproc' is small or is a prime */
if (nproc < 20) { /* do 1D grid for small process counts */
*P = 1;
*Q = nproc;
return 0;
}
}
return 0;
}
size_t
HPCC_Memory(MPI_Comm comm) {
int myRank, commSize;
int num_threads;
char memFile[13] = "hpccmemf.txt";
char buf[HPL_LINE_MAX]; int nbuf = HPL_LINE_MAX;
char *sVal;
FILE *f;
double mult, mval, procMem;
size_t rv;
mult = 1.0;
num_threads = 1;
MPI_Comm_size( comm, &commSize );
MPI_Comm_rank( comm, &myRank );
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single nowait
{
num_threads = omp_get_num_threads();
}
}
#endif
if (myRank == 0) {
procMem = 64;
f = fopen( memFile, "r" );
if (f) {
if (fgets( buf, nbuf, f )) {
if (strncmp( "Total=", buf, 6 ) == 0) {
mult = 1.0 / commSize;
sVal = buf + 6;
HPCC_MemSpec = 1;
} else if (strncmp( "Thread=", buf, 7 ) == 0) {
mult = num_threads;
sVal = buf + 7;
HPCC_MemSpec = 2;
} else if (strncmp( "Process=", buf, 8 ) == 0) {
mult = 1.0;
sVal = buf + 8;
HPCC_MemSpec = 3;
} else
sVal = NULL;
if (sVal && 1 == sscanf( sVal, "%lf", &mval )) {
procMem = mval * mult;
HPCC_MemVal = mval;
}
}
fclose( f );
}
}
MPI_Bcast( &procMem, 1, MPI_DOUBLE, 0, comm );
rv = procMem;
rv *= 1024; rv *= 1024;
HPCC_MemProc = procMem;
return rv;
}
int
HPCC_Defaults(HPL_T_test *TEST, int *NS, int *N,
int *NBS, int *NB,
HPL_T_ORDER *PMAPPIN,
int *NPQS, int *P, int *Q,
int *NPFS, HPL_T_FACT *PF,
int *NBMS, int *NBM,
int *NDVS, int *NDV,
int *NRFS, HPL_T_FACT *RF,
int *NTPS, HPL_T_TOP *TP,
int *NDHS, int *DH,
HPL_T_SWAP *FSWAP, int *TSWAP, int *L1NOTRAN, int *UNOTRAN, int *EQUIL, int *ALIGN, MPI_Comm comm) {
int nb = 80;
double memFactor = 0.8;
*NS = *NBS = *NPQS = *NPFS = *NBMS = *NDVS = *NRFS = *NTPS = *NDHS = 1;
TEST->thrsh = 16.0;
*NB = nb;
*PMAPPIN = HPL_COLUMN_MAJOR;
HPCC_ProcessGrid( P, Q, comm );
*N = (int)sqrt( memFactor * (double)(*P * *Q) * (double)(HPCC_Memory( comm ) / sizeof(double)) ) / (2 * nb);
*N *= 2*nb; /* make N multiple of 2*nb so both HPL and PTRANS see matrix
dimension divisible by nb */
*PF = HPL_RIGHT_LOOKING;
*NBM = 4;
*NDV = 2;
*RF = HPL_CROUT;
*TP = HPL_1RING_M;
*DH = 1;
*FSWAP = HPL_SW_MIX;
*TSWAP = 64;
*L1NOTRAN = 0;
*UNOTRAN = 0;
*EQUIL = 1;
*ALIGN = 8;
return 0;
}
#ifdef XERBLA_MISSING
#ifdef Add_
#define F77xerbla xerbla_
#endif
#ifdef Add__
#define F77xerbla xerbla__
#endif
#ifdef NoChange
#define F77xerbla xerbla
#endif
#ifdef UpCase
#define F77xerbla XERBLA
#endif
#ifdef f77IsF2C
#define F77xerbla xerbla_
#endif
void
F77xerbla(char *srname, F77_INTEGER *info, long srname_len) {
/*
int i; char Cname[7];
for (i = 0; i < 6; i++) Cname[i] = srname[i];
Cname[6] = 0;
printf("xerbla(%d)\n", *info);
*/
printf("xerbla()\n");
fflush(stdout);
}
#endif
#ifdef HPCC_MEMALLCTR
#define MEM_MAXCNT 7
typedef double Mem_t;
static Mem_t *Mem_base;
static size_t Mem_dsize;
/*
Each entry can be in one of three states:
1. Full (holds a block of allocated memory) if:
ptr != NULL; size > 0; free == 0
2. Free (holds block of unallocated memory) if:
ptr != NULL; free = 1
3 Empty (doesn't hold a block of memory) if:
ptr == NULL; free = 1
*/
typedef struct {
Mem_t *Mem_ptr;
size_t Mem_size;
int Mem_free;
} Mem_entry_t;
static Mem_entry_t Mem_blocks[MEM_MAXCNT];
static void
HPCC_alloc_set_empty(int idx) {
int i, n0, n;
if (MEM_MAXCNT == idx) {
n0 = 0;
n = idx;
} else {
n0 = idx;
n = idx + 1;
}
/* initialize all blocks to empty */
for (i = n0; i < n; ++i) {
Mem_blocks[i].Mem_ptr = (Mem_t *)(NULL);
Mem_blocks[i].Mem_size = 0;
Mem_blocks[i].Mem_free = 1;
}
}
static void
HPCC_alloc_set_free(int idx, Mem_t *dptr, size_t size) {
Mem_blocks[idx].Mem_ptr = dptr;
Mem_blocks[idx].Mem_size = size;
Mem_blocks[idx].Mem_free = 1;
}
int
HPCC_alloc_init(size_t total_size) {
size_t dsize;
Mem_dsize = dsize = Mceil( total_size, sizeof(Mem_t) );
Mem_base = (Mem_t *)malloc( dsize * sizeof(Mem_t) );
HPCC_alloc_set_empty( MEM_MAXCNT );
if (Mem_base) {
HPCC_alloc_set_free( 0, Mem_base, dsize );
return 0;
}
return -1;
}
int
HPCC_alloc_finalize() {
free( Mem_base );
HPCC_alloc_set_empty( MEM_MAXCNT );
return 0;
}
void *
HPCC_malloc(size_t size) {
size_t dsize, diff_size, cur_diff_size;
int i, cur_best, cur_free;
dsize = Mceil( size, sizeof(Mem_t) );
cur_diff_size = Mem_dsize + 1;
cur_free = cur_best = MEM_MAXCNT;
for (i = 0; i < MEM_MAXCNT; ++i) {
/* skip full spots */
if (! Mem_blocks[i].Mem_free)
continue;
/* find empty spot */
if (! Mem_blocks[i].Mem_ptr) {
cur_free = i;
continue;
}
diff_size = Mem_blocks[i].Mem_size - dsize;
if (Mem_blocks[i].Mem_size >= dsize && diff_size < cur_diff_size) {
/* a match that's the best (so far) was found */
cur_diff_size = diff_size;
cur_best = i;
}
}
/* found a match */
if (cur_best < MEM_MAXCNT) {
if (cur_free < MEM_MAXCNT && cur_diff_size > 0) {
/* create a new free block */
HPCC_alloc_set_free( cur_free, Mem_blocks[cur_best].Mem_ptr + dsize,
cur_diff_size );
Mem_blocks[cur_best].Mem_size = dsize; /* shrink the best match */
}
Mem_blocks[cur_best].Mem_free = 0;
return (void *)(Mem_blocks[cur_best].Mem_ptr);
}
return NULL;
}
void
HPCC_free(void *ptr) {
Mem_t *dptr = (Mem_t *)ptr;
int cur_blk = MEM_MAXCNT, made_changes, i, j;
/* look for the block being freed */
for (i = 0; i < MEM_MAXCNT; ++i) {
if (Mem_blocks[i].Mem_free)
continue;
if (Mem_blocks[i].Mem_ptr == dptr) {
cur_blk = i;
break;
}
}
/* not finding the pointer (including NULL) causes abort */
if (MEM_MAXCNT == cur_blk) {
HPL_pabort( __LINE__, "HPCC_free", "Unknown pointer in HPCC_free()." );
}
/* double-free causes abort */
if (1 == Mem_blocks[cur_blk].Mem_free) {
HPL_pabort( __LINE__, "HPCC_free", "Second call to HPCC_free() with the same pointer." );
}
Mem_blocks[cur_blk].Mem_free = 1;
/* merge as many blocks as possible */
for (made_changes = 1; made_changes;) {
made_changes = 0;
for (i = 0; i < MEM_MAXCNT; ++i) {
/* empty or full blocks can't be merged */
if (! Mem_blocks[i].Mem_free || ! Mem_blocks[i].Mem_ptr)
continue;
for (j = 0; j < MEM_MAXCNT; ++j) {
/* empty or occupied blocks can't be merged */
if (! Mem_blocks[j].Mem_free || ! Mem_blocks[j].Mem_ptr)
continue;
if (Mem_blocks[i].Mem_ptr + Mem_blocks[i].Mem_size ==
Mem_blocks[j].Mem_ptr) {
Mem_blocks[i].Mem_size += Mem_blocks[j].Mem_size;
HPCC_alloc_set_empty( j );
made_changes = 1;
}
}
}
}
}
#endif
|
pi.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define NUM_THREADS 8
static long steps = 1000000000;
double step;
int main(int argc, const char* argv[]) {
double pi = 0.0;
double start, delta, sum[NUM_THREADS];
start = omp_get_wtime();
step = 1.0 / (double)steps;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
double x;
int id, i;
id = omp_get_thread_num();
for (i = id, sum[id] = 0.0; i < steps; i = i + NUM_THREADS) {
x = (i + 0.5) * step;
sum[id] += 4.0 / (1.0 + x * x);
}
}
for (int i = 0; i < NUM_THREADS; i++) {
pi += sum[i] * step;
}
delta = omp_get_wtime() - start;
printf("PI = %.16g computed in %.4g seconds with %d threads.\n", pi, delta, NUM_THREADS);
return EXIT_SUCCESS;
}
|
hello_world.c |
// OpenMP library header
#include <omp.h>
// Standard IO libraries
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int total_threads, thread_ID;
// Fork into threads
#pragma omp parallel
{
total_threads = omp_get_num_threads();
thread_ID = omp_get_thread_num();
printf("Hello world from %d of %d!\n", thread_ID, total_threads);
}
// Join all threads
printf("Finalizing with %d of %d\n", thread_ID, total_threads);
return 0;
} |
convolution_1x1_pack8to4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack8to4_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const __fp16* r0 = bottom_blob.channel(p);
__fp16* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x8_t _v0 = vld1q_f16(r0);
float16x8_t _v1 = vld1q_f16(r0 + 16);
float16x8_t _v2 = vld1q_f16(r0 + 32);
float16x8_t _v3 = vld1q_f16(r0 + 48);
vst1q_f16(outptr, _v0);
vst1q_f16(outptr + 8, _v1);
vst1q_f16(outptr + 16, _v2);
vst1q_f16(outptr + 24, _v3);
r0 += 64;
outptr += 32;
}
for (; j + 1 < outw; j += 2)
{
float16x8_t _v0 = vld1q_f16(r0);
float16x8_t _v1 = vld1q_f16(r0 + 16);
vst1q_f16(outptr, _v0);
vst1q_f16(outptr + 8, _v1);
r0 += 32;
outptr += 16;
}
for (; j < outw; j++)
{
float16x8_t _v = vld1q_f16(r0);
vst1q_f16(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to4_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
pr32468-1.c | /* PR libgomp/32468 */
/* { dg-do compile } */
/* { dg-options "-O2 -fopenmp -fdump-tree-ompexp" } */
extern int printf (const char *, ...);
extern int omp_get_thread_num (void), omp_get_num_threads (void);
extern int bar (void);
extern int baz (const char *, ...);
void
f1 (void)
{
#pragma omp parallel
{
baz ("%d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
#pragma omp sections
{
#pragma omp section
printf ("section1 %d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
#pragma omp section
printf ("section2 %d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
}
}
}
void
f2 (void)
{
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
printf ("section1 %d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
#pragma omp section
printf ("section2 %d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
}
baz ("%d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
}
}
void
f3 (void)
{
#pragma omp parallel
{
int bb = bar ();
#pragma omp sections
{
#pragma omp section
printf ("section1 %d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
#pragma omp section
printf ("section2 %d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
}
}
}
void
f4 (void)
{
int i;
#pragma omp parallel
{
baz ("%d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
#pragma omp for schedule (dynamic, 15)
for (i = 0; i < 10000; i++)
printf ("section1 %d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
}
}
void
f5 (void)
{
int i;
#pragma omp parallel
{
#pragma omp for schedule (dynamic, 15)
for (i = 0; i < 10000; i++)
printf ("section1 %d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
baz ("%d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
}
}
void
f6 (void)
{
int i;
#pragma omp parallel
{
int bb = bar ();
#pragma omp for schedule (runtime)
for (i = 0; i < 10000; i++)
printf ("section1 %d/%d\n", omp_get_thread_num (), omp_get_num_threads ());
}
}
/* There should not be a GOMP_parallel_{loop,sections}* call. */
/* { dg-final { scan-tree-dump-times "GOMP_parallel_loop" 0 "ompexp"} } */
/* { dg-final { scan-tree-dump-times "GOMP_parallel_sections" 0 "ompexp"} } */
/* { dg-final { cleanup-tree-dump "ompexp" } } */
|
strategy.c | /*
* stategy.c
*
* Created on: Nov 19, 2017
* Author: Davide Malvezzi
*/
#include "strategy.h"
#include <omp.h>
void initStrategy(Strategy_ptr strategy, int spaceStep){
//strategy->lastChange = -1;
strategy->size = randInt(MIN_CHANGE_POINT, MAX_CHANGE_POINT);
float positionIndexMin, positionIndexMax;
float changePointStep = (float)TRACK_END_POINT / strategy->size;
for(int i = 0; i < strategy->size; i++){
positionIndexMin = changePointStep * i;
positionIndexMax = changePointStep * (i + 1);
strategy->points[i].positionIndex = randFloat(positionIndexMin, positionIndexMax) / spaceStep;
strategy->points[i].action = randInt(ACTION_MINUS, ACTION_PLUS);
}
}
void printStrategy(Strategy_ptr strategy){
wprintw(debugWindow, "Size: %d\n", strategy->size);
wprintw(debugWindow, "CP: ");
for(int i = 0; i < strategy->size; i++){
printChangePoint(strategy->points[i]);
wprintw(debugWindow, " ");
}
printf("\n");
wrefresh(debugWindow);
}
void strategyToFile(Strategy_ptr strategy, const char* stratName){
char fileName[32];
FILE* file;
Simulation simulation;
simulateStrategy(strategy, &simulation,
START_VELOCITY, END_VELOCITY, START_MAP, 1
);
struct stat st = {0};
if (stat(stratName, &st) == -1) {
mkdir(stratName, 0700);
}
//Save simulation
strcpy(fileName, stratName);
strcat(fileName, "/simulation.csv");
file = fopen(fileName, "wt");
simulationToCsv(&simulation, file);
fclose(file);
//Save strategy
strcpy(fileName, stratName);
strcat(fileName, "/strategy.csv");
file = fopen(fileName, "wt");
fprintf(file, "%d\n", strategy->size);
for(int i = 0; i < strategy->size; i++){
fprintf(file, "%d,%d\n", strategy->points[i].positionIndex, strategy->points[i].action);
}
fclose(file);
//Bin file
strcpy(fileName, stratName);
strcat(fileName, "/str.bin");
file = fopen(fileName, "wt");
fwrite(strategy, sizeof(Strategy), 1, file);
fclose(file);
//Strat file
strcpy(fileName, stratName);
strcat(fileName, "/strat.txt");
file = fopen(fileName, "wt");
simulationToStrategy(&simulation, file);
fclose(file);
}
void strategyFromCsv(Strategy_ptr strategy, const char* fileName){
FILE* file = fopen(fileName, "rt");
char newLine, comma;
fscanf(file, "%d%c", &strategy->size, &newLine);
for(int i = 0; i < strategy->size; i++){
fscanf(file,"%d%c%d%c", &strategy->points[i].positionIndex, &comma, &strategy->points[i].action, &newLine);
}
fclose(file);
}
int compareStrategyFitness(const void* elem1, const void* elem2) {
Strategy_ptr str1 = (Strategy_ptr)elem1;
Strategy_ptr str2 = (Strategy_ptr)elem2;
if (str1->fitness > str2->fitness) return 1;
if (str1->fitness < str2->fitness) return -1;
return 0;
}
int getChangePointNearAt(Strategy_ptr strategy, int position){
int sx = 0;
int dx = strategy->size - 1;
int m = (sx + dx) / 2;
if(strategy->size <= 0 || strategy->points[0].positionIndex > position){
//The value is before the first change point
return -1;
}
while (sx <= dx) {
if (strategy->points[m].positionIndex < position){
sx = m + 1;
}
else if (strategy->points[m].positionIndex > position){
dx = m - 1;
}
else{
break;
}
m = (sx + dx) / 2;
}
//Search for the last change point with position <= the searched position
while(m < strategy->size - 1 && strategy->points[m].positionIndex == strategy->points[m + 1].positionIndex){
m++;
}
return m;
}
void addChangePoint(Strategy_ptr strategy, int index){
addElement(strategy->points, index, strategy->size, sizeof(ChangePoint));
strategy->size++;
}
void removeChangePoint(Strategy_ptr strategy, int index){
removeElement(strategy->points, index, strategy->size, sizeof(ChangePoint));
strategy->size--;
}
void simulateStrategy(Strategy_ptr strategy, Simulation_ptr simulation, float startVelocity, float endVelocity, int startMap, int keepTimeInvalid){
int i = 0;
//Init simulation
initSimulation(simulation, startVelocity, startMap);
//Simulate from start to first change point
simulation->result = simulate(simulation,
0,
strategy->points[0].positionIndex * SPACE_STEP,
ACTION_NONE,
keepTimeInvalid
);
//Simulate all change point
while(i < strategy->size - 1 && simulation->result == SIM_OK){
simulation->result = simulate(simulation,
strategy->points[i].positionIndex * SPACE_STEP,
strategy->points[i + 1].positionIndex * SPACE_STEP,
strategy->points[i].action,
keepTimeInvalid
);
i++;
}
if(simulation->result == SIM_OK){
//Simulate from last change point to track end
simulation->result = simulate(simulation,
strategy->points[strategy->size - 1].positionIndex * SPACE_STEP,
TRACK_END_POINT,
strategy->points[strategy->size - 1].action,
keepTimeInvalid
);
#ifdef N_LAP
//Get double energy penalty if started lap with gas on and ended lap with gas on
if (simulation->selectedMap != 0 && simulation->steps[0].map != 0) {
simulation->energy -= MotorStartPenalty;
}
//Check if last velocity is >= than the start velocity
//Just for general lap
if(simulation->result == SIM_OK
&& simulation->velocity < endVelocity){
simulation->time = INFINITY;
simulation->energy = INFINITY;
simulation->result = SIM_END_VEL;
}
#endif
#ifdef PATTERN_LAPS
if(simulation->result == SIM_OK &&
fabs(simulation->steps[TRACK_LENGTH * PATTERN_START_LAP].vi - simulation->steps[TRACK_LENGTH * PATTERN_END_LAP].vi) < 0.01){
float patternLapsEnergy = 0;
float patternLapsTime = 0;
for(int i = TRACK_LENGTH * PATTERN_START_LAP; i < TRACK_LENGTH * PATTERN_END_LAP; i++){
patternLapsEnergy += simulation->steps[i].dE;
patternLapsTime += simulation->steps[i].dt;
}
simulation->time += patternLapsTime * PATTERN_REPEAT;
if(simulation->time <= RACE_TIME){
if(simulation->steps[TRACK_LENGTH * PATTERN_START_LAP - 1].map == 0 &&
simulation->steps[TRACK_LENGTH * PATTERN_START_LAP].map != 0 &&
simulation->steps[TRACK_LENGTH * PATTERN_END_LAP - 1].map != 0){
//simulation->energy -= MotorStartPenalty;
patternLapsEnergy -= MotorStartPenalty;
}
simulation->energy += patternLapsEnergy * PATTERN_REPEAT;
}
else{
simulation->time = INFINITY;
simulation->energy = INFINITY;
simulation->result = SIM_TIME_MAX;
}
}
else{
simulation->time = INFINITY;
simulation->energy = INFINITY;
simulation->result = SIM_END_VEL;
}
#endif
}
}
void parallelSimulateStrategy(Strategy_ptr strategies, SimulationOutput_ptr simOut, int count, int threadCount, float startVelocity, float endVelocity, int startMap, int keepTimeInvalid){
#pragma omp parallel num_threads(threadCount)
{
Simulation sim;
#pragma omp for
for(int i = 0; i < count; i++){
//Simulate the strategy
simulateStrategy(&strategies[i], &sim, startVelocity, endVelocity, startMap, keepTimeInvalid);
//Save the output
simOut[i].energy = sim.energy;
simOut[i].time = sim.time;
simOut[i].velocity = sim.velocity;
simOut[i].result = sim.result;
}
}
}
float evalStrategySimilarity(Strategy_ptr str1, Strategy_ptr str2){
float factor;
float scalar = 0, l1 = 0, l2 = 0;
int map1 = START_MAP;
int map2 = START_MAP;
int index1 = 0, index2 = 0;
if(str1->size == 0 || str2->size == 0)return INFINITY;
for(int i = 0; i < SIM_STEP_COUNT; i++){
while(index1 < str1->size && str1->points[index1].positionIndex == i){
map1 = getCurrentMap(str1->points[index1].action, map1);
index1++;
}
while(index2 < str2->size && str2->points[index2].positionIndex == i){
map2 = getCurrentMap(str2->points[index2].action, map2);
index2++;
}
scalar += map1 * map2;
l1 += map1 * map1;
l2 += map2 * map2;
}
//Cosine measure
factor = scalar / (sqrt(l1) * sqrt(l2));
factor = acosf(factor) * 180.0 / M_PI;
//Jacquard measure
//factor = scalar / (l1 + l2 - scalar);
return factor;
}
|
sin.c | #include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
static long N = 100000000;
#define NUM_THREADS 4
int main ()
{
//float start_time, run_time;
float *x = (float *) calloc(N, sizeof(float));
float *y = (float *) calloc(N, sizeof(float));
if (x == NULL || y == NULL) exit(1);
memset(x, 1, sizeof(float) * N);
//start_time = omp_get_wtime();
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel for schedule(static)
for (int i = 0; i < N; ++i) {
float foo = x[i];
y[i] = sinf(cos(sin(cos(tan(log(sin(exp(sin(foo)))))))));
}
//run_time = omp_get_wtime() - start_time;
//printf("\n pi with %ld size is %lf in %lf seconds\n ", N, y[1], run_time);
return 0;
}
// Compile: `gcc -fopenmp sin.c -lm`
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
RspRspOp<LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
// lhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
}
// rhs grad
if (req[1] != kNullOp) {
RspRspOp<ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
// rhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value &&
std::is_same<mshadow_op::left, ROP>::value;
CHECK(supported_ops)
<< "Only backward for mul is supported (LOP should be right, ROP should be left)";
const NDArray& out_grad = inputs[0];
const NDArray& lhs_in = inputs[1];
const NDArray& rhs_in = inputs[2];
const NDArray& lhs_grad = outputs[0];
const NDArray& rhs_grad = outputs[1];
const bool reverse = (outputs[0].storage_type() == kCSRStorage);
if (reverse) {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]},
{rhs_grad.data()});
} else {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]},
{lhs_grad.data()});
}
}
public:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsCsrDnsOp(mshadow::Stream<xpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- RSP binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsRspDnsOp(mshadow::Stream<xpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the binary inputs to be dense and still produce a sparse output.
* Typically used for sparse * dense = sparse.
* Note: for csr, it dispatches to fallback other than csr, csr -> csr
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) {
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Allow one of the inputs to be dense and produce a dense output,
* for rsp inputs only support when both inputs are rsp type.
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool cpu_only, bool rsp, bool csr>
static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2);
CHECK_EQ(out_attrs->size(), 1);
const auto lhs_stype = (*in_attrs)[0];
const auto rhs_stype = (*in_attrs)[1];
bool dispatched = false;
const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns ... -> dns
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp, ... -> rsp
dispatched = storage_type_assign(out_attrs, kRowSparseStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr, ... -> csr
dispatched = storage_type_assign(out_attrs, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) ||
(lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) {
// dense, csr -> dense / csr, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) {
// dense, rsp -> dense / rsp, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) &&
(out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
} else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrDnsOp<xpu, OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kRowSparseStorage);
const NDArray& rsp = (reverse)? inputs[0] : inputs[1];
DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kCSRStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto out_grad_stype = inputs[0].storage_type();
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
}
if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) ||
(lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) &&
out_grad_stype == kDefaultStorage) {
// dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr]
DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs);
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, with FComputeEx for csr and rsp available.
when inputs contain both sparse and dense, sparse output is preferred. */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferSparseStorageType) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
/*! \brief Binary launch, with FComputeEx for prefer dense */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
cancellation_for_sections.c | // RUN: %libomp-compile && env OMP_CANCELLATION=true %libomp-run
// XFAIL: gcc
// Clang had a bug until version 4.0.1 which resulted in a hang.
// UNSUPPORTED: clang-3, clang-4.0.0
// Regression test for a bug in cancellation to cover effect of `#pragma omp cancel`
// in a loop construct, on sections construct.
// Pass condition: Cancellation status from `for` does not persist
// to `sections`.
#include <stdio.h>
#include <omp.h>
int result[2] = {0, 0};
void cq416850_for_sections() {
unsigned i;
// 1) loop
#pragma omp for
for (i = 0; i < 1; i++) {
result[0] = 1;
#pragma omp cancel for
result[0] = 2;
}
// printf("thread %d: result[0] = %d, result[1] = %d \n", omp_get_thread_num(), result[0], result[1]);
// 2) sections
#pragma omp sections
{
#pragma omp section
{
result[1] = 1;
#pragma omp cancellation point sections
result[1] = 2;
}
}
}
int main(void) {
if(!omp_get_cancellation()) {
printf("Cancellation not enabled!\n");
return 2;
}
#pragma omp parallel num_threads(4)
{
cq416850_for_sections();
}
if (result[0] != 1 || result[1] != 2) {
printf("Incorrect values. "
"result[0] = %d (expected 1), "
"result[1] = %d (expected 2).\n",
result[0], result[1]);
printf("FAILED\n");
return 1;
}
printf("PASSED\n");
return 0;
}
|
IJMatrix_parcsr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "_hypre_parcsr_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixCreateParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_ParCSRMatrix *par_matrix;
HYPRE_BigInt row_starts[2];
HYPRE_BigInt col_starts[2];
HYPRE_Int i;
if (hypre_IJMatrixGlobalFirstRow(matrix))
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
row_starts[i] = row_partitioning[i];
}
}
if (hypre_IJMatrixGlobalFirstCol(matrix))
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i] - hypre_IJMatrixGlobalFirstCol(matrix);
}
}
else
{
for (i = 0; i < 2; i++)
{
col_starts[i] = col_partitioning[i];
}
}
par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix),
hypre_IJMatrixGlobalNumCols(matrix),
row_starts, col_starts, 0, 0, 0);
hypre_IJMatrixObject(matrix) = par_matrix;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetRowSizesParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *sizes)
{
HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
}
if (!row_space)
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
for (i = 0; i < local_num_rows; i++)
{
row_space[i] = sizes[i];
}
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0;
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i];
}
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetDiagOffdSizesParCSR
* sets diag_i inside the diag part of the ParCSRMatrix
* and offd_i inside the offd part,
* requires exact row sizes for diag and offd
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offd_sizes)
{
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL)
{
hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST);
}
hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOnProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_on_proc_elmts)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOffProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts;
#endif
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixInitializeParCSR
*
* initializes AuxParCSRMatrix and ParCSRMatrix as necessary
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix)
{
return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle()));
}
HYPRE_Int
hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location)
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
HYPRE_MemoryLocation memory_location_aux =
hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE;
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
}
HYPRE_Int local_num_rows = hypre_ParCSRMatrixNumRows(par_matrix);
HYPRE_Int i;
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location);
hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux);
if (memory_location_aux == HYPRE_MEMORY_HOST)
{
if (hypre_AuxParCSRMatrixDiagSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(diag)[i+1] = hypre_CSRMatrixI(diag)[i] + hypre_AuxParCSRMatrixDiagSizes(aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(diag) = hypre_CSRMatrixI(diag)[local_num_rows];
hypre_CSRMatrixInitialize(diag);
}
if (hypre_AuxParCSRMatrixOffdSizes(aux_matrix))
{
for (i = 0; i < local_num_rows; i++)
{
hypre_CSRMatrixI(offd)[i+1] = hypre_CSRMatrixI(offd)[i] + hypre_AuxParCSRMatrixOffdSizes(aux_matrix)[i];
}
hypre_CSRMatrixNumNonzeros(offd) = hypre_CSRMatrixI(offd)[local_num_rows];
hypre_CSRMatrixInitialize(offd);
}
}
if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < local_num_rows; i++)
{
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[i] = hypre_CSRMatrixI(diag)[i];
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[i] = hypre_CSRMatrixI(offd)[i];
}
}
}
else if ( memory_location_aux == HYPRE_MEMORY_HOST )
{
/* AB 4/06 - the assemble routine destroys the aux matrix - so we need
to recreate if initialize is called again
*/
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, hypre_ParCSRMatrixNumRows(par_matrix),
hypre_ParCSRMatrixNumCols(par_matrix), NULL);
hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST;
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetRowCountsParCSR
*
* gets the number of columns for rows specified by the user
*
*****************************************************************************/
HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols)
{
HYPRE_BigInt row_index;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int i, my_id, index;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < nrows; i++)
{
row_index = rows[i];
if (row_index >= row_partitioning[0] &&
row_index < row_partitioning[1])
{
/* compute local row number */
index = (HYPRE_Int)(row_index - row_partitioning[0]);
ncols[i] = diag_i[index+1]-diag_i[index]+offd_i[index+1]-offd_i[index];
}
else
{
ncols[i] = 0;
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n",
row_index, my_id);
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetValuesParCSR
*
* gets values of an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix);
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_Int i, j, n, ii, indx;
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, row, col_indx, first;
HYPRE_Int row_local, row_size;
HYPRE_Int warning = 0;
HYPRE_Int *counter;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (assemble_flag == 0)
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n");
}
}
col_0 = col_starts[0];
col_n = col_starts[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
}
if (nrows < 0)
{
nrows = -nrows;
counter = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST);
counter[0] = 0;
for (i=0; i < nrows; i++)
{
counter[i+1] = counter[i]+ncols[i];
}
indx = 0;
for (i=0; i < nrows; i++)
{
row = rows[i];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
row_size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (counter[i]+row_size > counter[nrows])
{
hypre_error_in_arg(1);
if (print_level)
{
hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n");
}
}
if (ncols[i] < row_size)
{
warning = 1;
}
for (j = diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0;
values[indx++] = diag_data[j];
}
for (j = offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
cols[indx] = col_map_offd[offd_j[j]];
values[indx++] = offd_data[j];
}
counter[i+1] = indx;
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
if (warning)
{
for (i=0; i < nrows; i++)
{
ncols[i] = counter[i+1] - counter[i];
}
if (print_level)
{
hypre_printf ("Warning! ncols has been changed!\n");
}
}
hypre_TFree(counter, HYPRE_MEMORY_HOST);
}
else
{
indx = 0;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (n == 0) /* empty row */
{
continue;
}
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
for (i=0; i < n; i++)
{
col_indx = cols[indx] - first;
values[indx] = 0.0;
if (col_indx < col_0 || col_indx > col_n)
/* search in offd */
{
for (j=offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
if (col_map_offd[offd_j[j]] == col_indx)
{
values[indx] = offd_data[j];
break;
}
}
}
else /* search in diag */
{
col_indx = col_indx - col_0;
for (j=diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
if (diag_j[j] == (HYPRE_Int)col_indx)
{
values[indx] = diag_data[j];
break;
}
}
}
indx++;
}
}
else
{
if (print_level)
{
hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id);
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetValuesParCSR
*
* sets values in an IJMatrix before assembly,
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
//HYPRE_Int row_len;
HYPRE_BigInt col_0, col_n, row;
HYPRE_Int i, ii, j, n, not_found;
//HYPRE_Int col_indx, cnt1;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_BigInt first;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
HYPRE_Int j_offd;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1;*/
return hypre_error_flag;
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
/* return -1; */
return hypre_error_flag;
}
}
indx++;
}
}
}
}
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
HYPRE_Int col_j;
offd_indx =hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx =hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
col_j = (HYPRE_Int)(cols[indx]-col_0);
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetConstantValuesParCSR
*
* sets all values in an already assembled IJMatrix to a constant value.
*
*****************************************************************************/
void
hypre_IJMatrixSetConstantValuesParCSRHost( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag);
HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd);
HYPRE_Int ii;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_diag; ii++)
{
diag_data[ii] = value;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < nnz_offd; ii++)
{
offd_data[ii] = value;
}
}
HYPRE_Int
hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Complex value )
{
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetConstantValuesParCSRDevice(matrix, value);
}
else
#endif
{
hypre_IJMatrixSetConstantValuesParCSRHost(matrix, value);
}
}
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Matrix not assembled! Required to set constant values!");
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_BigInt col_0, col_n;
HYPRE_Int i, ii, j, n, not_found;
HYPRE_BigInt **aux_j;
HYPRE_BigInt *local_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_BigInt first;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (hypre_IJMatrixAssembleFlag(matrix))
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int j_offd;
/* AB - 4/06 - need to get this object*/
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local] +
offd_i[row_local+1] - offd_i[row_local];
if (n > size) /* Should we change this and allow this?
This could be same column index, i.e. only last
value is set, previous ones overwritten. */
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
/* return -1; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
return hypre_error_flag;
}
}
indx++;
}
}
/* not my row */
else
{
if (!aux_matrix)
{
size = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
/* AB - 4/6 - the row should be negative to indicate an add */
/* UMY - 12/28/09 - now positive since we eliminated the feature of
setting on other processors */
/* off_proc_i[off_proc_i_indx++] = row; */
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_BigInt *big_offd_j;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
/* return 1;*/
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0);
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == col_j)
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = col_j;
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix)
= current_num_elmts;
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixDestroyParCSR
*
* frees an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix));
hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix));
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixTransposeParCSR
*
* Tranposes an IJMatrix of type ParCSRMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixTransposeParCSR( hypre_IJMatrix *matrix_A,
hypre_IJMatrix *matrix_AT )
{
hypre_ParCSRMatrix *par_A = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_A);
hypre_ParCSRMatrix *par_AT;
/* Free old object if existent */
if (hypre_IJMatrixObject(matrix_AT))
{
par_AT = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_AT);
hypre_ParCSRMatrixDestroy(par_AT);
hypre_IJMatrixObject(matrix_AT) = NULL;
}
hypre_ParCSRMatrixTranspose(par_A, &par_AT, 1);
hypre_ParCSRMatrixSetNumNonzeros(par_AT);
hypre_ParCSRMatrixSetDNumNonzeros(par_AT);
hypre_MatvecCommPkgCreate(par_AT);
hypre_IJMatrixObject(matrix_AT) = (void *) par_AT;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixNormParCSR
*
* Computes the Infinity norm of an IJMatrix of type ParCSRMatrix
*
* TODO: Add other norms
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixNormParCSR( hypre_IJMatrix *matrix,
HYPRE_Real *norm )
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_ParCSRMatrixInfNorm(par_matrix, norm);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddParCSR
*
* Performs C = alpha*A + beta*B, where A, B and C are IJMatrices of
* type ParCSRMatrix.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddParCSR( HYPRE_Complex alpha,
hypre_IJMatrix *matrix_A,
HYPRE_Complex beta,
hypre_IJMatrix *matrix_B,
hypre_IJMatrix *matrix_C )
{
hypre_ParCSRMatrix *par_A = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_A);
hypre_ParCSRMatrix *par_B = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_B);
hypre_ParCSRMatrix *par_C;
/* Free old object if existent */
if (hypre_IJMatrixObject(matrix_C))
{
par_C = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix_C);
hypre_ParCSRMatrixDestroy(par_C);
hypre_IJMatrixObject(matrix_C) = NULL;
}
hypre_ParCSRMatrixAdd(alpha, par_A, beta, par_B, &par_C);
hypre_ParCSRMatrixSetNumNonzeros(par_C);
hypre_ParCSRMatrixSetDNumNonzeros(par_C);
if (!hypre_ParCSRMatrixCommPkg(par_C))
{
hypre_MatvecCommPkgCreate(par_C);
}
hypre_IJMatrixObject(matrix_C) = (void *) par_C;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAssembleOffProcValsParCSR
*
* This is for handling set and get values calls to off-proc. entries -
* it is called from matrix assemble. There is an alternate version for
* when the assumed partition is being used.
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_MemoryLocation memory_location,
HYPRE_BigInt *off_proc_i,
HYPRE_BigInt *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int i, j, k, in_i;
HYPRE_Int myid;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt global_first_col;
HYPRE_BigInt global_first_row;
HYPRE_Int ex_num_contacts = 0, num_rows = 0;
HYPRE_BigInt range_start, range_end;
HYPRE_Int num_elements;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_BigInt row;
HYPRE_Int num_ranges, row_index = 0;
HYPRE_Int num_recvs;
HYPRE_BigInt upper_bound;
HYPRE_Int counter;
HYPRE_Int num_real_procs;
HYPRE_Int /*current_proc,*/ original_proc_indx;
HYPRE_BigInt *row_list=NULL;
HYPRE_Int *row_list_num_elements=NULL;
HYPRE_Int *a_proc_id=NULL, *orig_order=NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_BigInt *ex_contact_buf = NULL;
HYPRE_Int *recv_starts=NULL;
HYPRE_BigInt *response_buf = NULL;
HYPRE_Int *response_buf_starts=NULL;
HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL;
HYPRE_Int *argsort_contact_procs = NULL;
HYPRE_Int obj_size_bytes, complex_size;
HYPRE_BigInt big_int_size;
HYPRE_Int tmp_int;
HYPRE_BigInt tmp_big_int;
HYPRE_BigInt *col_ptr;
HYPRE_BigInt *big_int_data = NULL;
HYPRE_Int big_int_data_size = 0, complex_data_size = 0;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Complex *col_data_ptr;
HYPRE_Complex *complex_data = NULL;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_cols = hypre_IJMatrixGlobalNumCols(matrix);
global_first_col = hypre_IJMatrixGlobalFirstCol(matrix);
global_first_row = hypre_IJMatrixGlobalFirstRow(matrix);
if (memory_location == HYPRE_MEMORY_DEVICE)
{
HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2*current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST);
HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST);
hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE);
for (i = 0; i < current_num_elmts; i++)
{
#if defined(HYPRE_DEBUG)
hypre_assert(tmp[i] < hypre_IJMatrixRowPartitioning(matrix)[0] || tmp[i] >= hypre_IJMatrixRowPartitioning(matrix)[1]);
hypre_assert(tmp[i] >= global_first_row && tmp[i] < global_first_row + hypre_IJMatrixGlobalNumRows(matrix));
hypre_assert(off_proc_j_h[i] >= global_first_col && off_proc_j_h[i] < global_first_col + global_num_cols);
#endif
off_proc_i_h[2*i] = tmp[i];
off_proc_i_h[2*i+1] = 1;
}
off_proc_i_indx = current_num_elmts * 2;
off_proc_i = off_proc_i_h;
off_proc_j = off_proc_j_h;
off_proc_data = off_proc_data_h;
hypre_TFree(tmp, HYPRE_MEMORY_HOST);
}
/* call hypre_IJMatrixAddToValuesParCSR directly inside this function
* with one chunk of data */
HYPRE_Int off_proc_nelm_recv_cur = 0;
HYPRE_Int off_proc_nelm_recv_max = 0;
HYPRE_BigInt *off_proc_i_recv = NULL;
HYPRE_BigInt *off_proc_j_recv = NULL;
HYPRE_Complex *off_proc_data_recv = NULL;
HYPRE_BigInt *off_proc_i_recv_d = NULL;
HYPRE_BigInt *off_proc_j_recv_d = NULL;
HYPRE_Complex *off_proc_data_recv_d = NULL;
num_rows = off_proc_i_indx/2;
/* verify that we have created the assumed partition */
if (hypre_IJMatrixAssumedPart(matrix) == NULL)
{
hypre_IJMatrixCreateAssumedPartition(matrix);
}
apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix);
/*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(par_matrix);
}
apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/
row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST);
row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
/* get the assumed processor id for each row */
if (num_rows > 0 )
{
for (i=0; i < num_rows; i++)
{
row = off_proc_i[i*2];
//if (row < 0) row = -row - 1;
row_list[i] = row;
row_list_num_elements[i] = off_proc_i[i*2+1];
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_cols, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows -1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i=1; i < num_rows; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact assumed
processors and find out who the actual row owner is - we will contact with
a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST);
ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST);
counter = 0;
range_end = -1;
for (i=0; i< num_rows; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0)
{
ex_contact_buf[counter*2 - 1] = row_list[i-1];
}
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter*2;
ex_contact_buf[counter*2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols,
&range_start, &range_end);
}
}
/* finish the starts */
ex_contact_vec_starts[counter] = counter*2;
/* finish the last range */
if (counter > 0)
{
ex_contact_buf[counter*2 - 1] = row_list[num_rows - 1];
}
/* don't allocate space for responses */
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by a range upper bound */
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST);
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges/2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i=0; i<num_ranges; i++)
{
upper_bound = response_buf[i*2+1];
counter = 0;
tmp_id = response_buf[i*2];
/* loop through row_list entries - counting how many are in the range */
while (j < num_rows && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real processor ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int data and HYPRE_Complex data. that we will need to pack
together */
/* first find out how many rows and elements we need to send per proc - so we
can do storage */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
num_elements_total[0] = row_list_num_elements[orig_order[0]];
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i=1; i < num_rows; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
num_elements_total[counter] += row_list_num_elements[orig_order[i]];
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
num_elements_total[counter] = row_list_num_elements[orig_order[i]];
}
}
}
/* to pack together, we need to use the largest obj. size of
(HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are
wasting some storage, but I do not think that it will be a
large amount since this function should not be used on really
large amounts of data anyway*/
big_int_size = sizeof(HYPRE_BigInt);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(big_int_size, complex_size);
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf contains #rows, row #,
no. elements, col indicies, col data, row #, no. elements, col
indicies, col data, etc. */
/* first calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST);
ex_contact_vec_starts[0] = -1;
for (i=0; i < num_real_procs; i++)
{
storage += 1 + 2 * num_rows_per_proc[i] + 2* num_elements_total[i];
ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
}
hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST);
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST);
index_ptr = void_contact_buf; /* step through with this index */
/* for each proc: #rows, row #, no. elements,
col indicies, col data, row #, no. elements, col indicies, col data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order, so
cheaper to do this*/
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST);
for (i=0; i < num_rows; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST);
counter = 0; /* index into data arrays */
prev_id = -1;
for (i=0; i < num_rows; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i*2];
num_elements = row_list_num_elements[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in_i = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in_i < 0)
{
in_i = -in_i - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* add row # */
hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* add number of elements */
hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* now add col indices */
for (j=0; j< num_elements; j++)
{
tmp_big_int = off_proc_j[counter+j]; /* col number */
hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i ++;
}
/* now add data */
for (j=0; j< num_elements; j++)
{
tmp_complex = off_proc_data[counter++]; /* value */
hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* increment the indexes to keep track of where we are - we
* adjust below to be actual starts*/
ex_contact_vec_starts[indx] = in_i;
}
/* some clean up */
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST);
hypre_TFree(orig_order, HYPRE_MEMORY_HOST);
hypre_TFree(row_list, HYPRE_MEMORY_HOST);
hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST);
hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST);
for (i=num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* first get the integer info in send_proc_obj */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 2,
comm, (void **) &response_buf, &response_buf_starts);
hypre_TFree(response_buf, HYPRE_MEMORY_HOST);
hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST);
hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST);
hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST);
/* Now we can unpack the send_proc_objects and call set
and add to values functions. We unpack messages in a
deterministic order, using processor rank */
num_recvs = send_proc_obj.length;
argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST);
for(i=0; i < num_recvs; i++)
{
argsort_contact_procs[i] = i;
}
/* This sort's the id array, but the original indices are stored in
* argsort_contact_procs */
hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs-1 );
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
for (i=0; i < num_recvs; i++)
{
/* Find the current processor in order, and reset recv_data_ptr to that processor's message */
original_proc_indx = argsort_contact_procs[i];
/*current_proc = send_proc_obj.id[i];*/
indx = recv_starts[original_proc_indx];
recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx*obj_size_bytes);
/* get the number of rows for this recv */
hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j=0; j < num_rows; j++) /* for each row: unpack info */
{
/* row # */
hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* num elements for this row */
hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* col indices */ /* Need to check this again !!!! */
if (big_int_size == obj_size_bytes)
{
col_ptr = (HYPRE_BigInt *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (big_int_data_size < num_elements)
{
big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k=0; k< num_elements; k++)
{
hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_ptr = big_int_data;
}
/* col data */
if (complex_size == obj_size_bytes)
{
col_data_ptr = (HYPRE_Complex *) recv_data_ptr;
recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (complex_data_size < num_elements)
{
complex_data =
hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST);
}
for (k=0; k< num_elements; k++)
{
hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_data_ptr = complex_data;
}
if (memory_location == HYPRE_MEMORY_HOST)
{
hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr);
}
else
{
HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements;
if (nelm_new > off_proc_nelm_recv_max)
{
off_proc_nelm_recv_max = nelm_new * 2;
off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST);
}
HYPRE_Int i;
for (i = 0; i < num_elements; i++)
{
off_proc_i_recv[off_proc_nelm_recv_cur + i] = row;
}
hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex, num_elements,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
off_proc_nelm_recv_cur = nelm_new;
}
indx += (num_elements*2);
}
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE);
hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur,
HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d, NULL, off_proc_j_recv_d,
off_proc_data_recv_d, "add");
#endif
}
hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST);
if (big_int_data)
{
hypre_TFree(big_int_data, HYPRE_MEMORY_HOST);
}
if (complex_data)
{
hypre_TFree(complex_data, HYPRE_MEMORY_HOST);
}
if (memory_location == HYPRE_MEMORY_DEVICE)
{
hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST);
hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE);
hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE);
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseIJOffProcVals
* Fill response function for the previous function (2nd data exchange)
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int index, count, elength;
HYPRE_Int object_size;
void *index_ptr;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2;
object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex));
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for vec starts
* and id */
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length +=20; /*add space for 20 more contact*/
send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
if( send_proc_obj->id != NULL)
{
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /* current number of elements */
if( send_proc_obj->id != NULL)
{
send_proc_obj->id[count] = contact_proc;
}
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 100);
elength += index;
send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements,
char, elength*object_size, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
index_ptr = (void *) ((char *) send_proc_obj->v_elements + index*object_size);
hypre_TMemcpy(index_ptr, p_recv_contact_buf , char, object_size*contact_size, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts[count+1] = index + contact_size;
send_proc_obj->length++;
/* output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------*/
HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length)
{
HYPRE_Int low, high, m;
low = 0;
high = list_length;
if (value >= list[high] || value < list[low])
{
return -1;
}
else
{
while (low+1 < high)
{
m = (low + high) / 2;
if (value < list[m])
{
high = m;
}
else if (value >= list[m])
{
low = m;
}
}
return low;
}
}
/******************************************************************************
*
* hypre_IJMatrixAssembleParCSR
*
* assembles IJMatrix from AuxParCSRMatrix auxiliary structure
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *diag_j;
HYPRE_Int *offd_j = NULL;
HYPRE_Complex *diag_data;
HYPRE_Complex *offd_data = NULL;
HYPRE_Int i, j, j0;
HYPRE_Int num_cols_offd;
HYPRE_Int *diag_pos;
HYPRE_BigInt *col_map_offd;
HYPRE_Int *rownnz;
HYPRE_Int *row_length;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_rows;
HYPRE_Int num_rownnz;
HYPRE_Int i_diag, i_offd;
HYPRE_BigInt col_0, col_n;
HYPRE_Int nnz_offd;
HYPRE_BigInt *big_offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_Complex temp;
HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix);
HYPRE_Int off_proc_i_indx;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int current_num_elmts;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int offd_proc_elmts;
//HYPRE_Int new_off_proc_i_indx;
//HYPRE_Int cancel_indx;
//HYPRE_Int col_indx;
//HYPRE_Int current_indx;
//HYPRE_Int current_i;
//HYPRE_Int row_len;
HYPRE_Int max_num_threads;
HYPRE_Int aux_flag, aux_flag_global;
HYPRE_ANNOTATE_FUNC_BEGIN;
max_num_threads = hypre_NumThreads();
/* first find out if anyone has an aux_matrix, and create one if you don't
* have one, but other procs do */
aux_flag = 0;
aux_flag_global = 0;
if (aux_matrix)
{
aux_flag = 1;
}
hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
if (aux_flag_global && (!aux_flag))
{
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if (aux_matrix)
{
/* first delete all cancelled elements */
/*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
if (cancel_indx)
{
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
col_indx = 0;
current_i = 0;
current_indx = 0;
new_off_proc_i_indx = off_proc_i_indx;
for (i=0; i < off_proc_i_indx; i= i+2)
{
row_len = off_proc_i[i+1];
for (j=0; j < off_proc_i[i+1]; j++)
{
if (off_proc_j[col_indx] == -1)
{
col_indx++;
row_len--;
current_num_elmts--;
}
else
{
off_proc_j[current_indx] = off_proc_j[col_indx];
off_proc_data[current_indx++] = off_proc_data[col_indx++];
}
}
if (row_len)
{
off_proc_i[current_i] = off_proc_i[i];
off_proc_i[current_i+1] = row_len;
current_i += 2;
}
else
{
new_off_proc_i_indx -= 2;
}
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
if (offd_proc_elmts)
{
max_off_proc_elmts=hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
hypre_IJMatrixAssembleOffProcValsParCSR(
matrix,off_proc_i_indx, max_off_proc_elmts, current_num_elmts,
HYPRE_MEMORY_HOST,
off_proc_i, off_proc_j, off_proc_data);
}
}
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
/* move data into ParCSRMatrix if not there already */
if (hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int *diag_array;
HYPRE_Int *offd_array;
/* Update nonzero rows of aux_matrix */
hypre_AuxParCSRMatrixSetRownnz(aux_matrix);
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
num_rownnz = hypre_AuxParCSRMatrixLocalNumRownnz(aux_matrix);
rownnz = hypre_AuxParCSRMatrixRownnz(aux_matrix);
diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
diag_pos = hypre_TAlloc(HYPRE_Int, num_rownnz, HYPRE_MEMORY_HOST);
i_diag = i_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, j, i_diag, i_offd)
#endif
{
HYPRE_BigInt *local_j;
HYPRE_Complex *local_data;
HYPRE_Int ii, rest, size, ns, ne;
HYPRE_Int num_threads, my_thread_num;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = num_rownnz/num_threads;
rest = num_rownnz - size*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(size + 1);
ne = (my_thread_num+1)*(size + 1);
}
else
{
ns = my_thread_num*size + rest;
ne = (my_thread_num+1)*size + rest;
}
i_diag = i_offd = 0;
for (i = ns; i < ne; i++)
{
ii = rownnz ? rownnz[i] : i;
local_j = aux_j[ii];
local_data = aux_data[ii];
diag_pos[i] = -1;
for (j = 0; j < row_length[ii]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
i_offd++;
}
else
{
i_diag++;
if ((HYPRE_Int)(local_j[j] - col_0) == i)
{
diag_pos[i] = j;
}
}
}
}
diag_array[my_thread_num] = i_diag;
offd_array[my_thread_num] = i_offd;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
i_diag = 0;
i_offd = 0;
for (i = 0; i < num_threads; i++)
{
i_diag += diag_array[i];
i_offd += offd_array[i];
diag_array[i] = i_diag;
offd_array[i] = i_offd;
}
diag_i[num_rows] = i_diag;
offd_i[num_rows] = i_offd;
hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag));
hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd));
hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd));
diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag));
diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag));
offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd));
offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd));
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd));
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num)
{
i_diag = diag_array[my_thread_num-1];
i_offd = offd_array[my_thread_num-1];
}
else
{
i_diag = 0;
i_offd = 0;
}
for (i = ns; i < ne; i++)
{
ii = rownnz ? rownnz[i] : i;
diag_i[ii] = i_diag;
offd_i[ii] = i_offd;
local_j = aux_j[ii];
local_data = aux_data[ii];
if (diag_pos[i] > -1)
{
diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0);
diag_data[i_diag++] = local_data[diag_pos[i]];
}
for (j = 0; j < row_length[ii]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
big_offd_j[i_offd] = local_j[j];
offd_data[i_offd++] = local_data[j];
}
else if (j != diag_pos[i])
{
diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0);
diag_data[i_diag++] = local_data[j];
}
}
}
/* Correct diag_i and offd_i */
if (rownnz != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < (ne-1); i++)
{
for (ii = rownnz[i] + 1; ii < rownnz[i+1]; ii++)
{
diag_i[ii] = diag_i[rownnz[i+1]];
offd_i[ii] = offd_i[rownnz[i+1]];
}
}
if (my_thread_num < (num_threads - 1))
{
for (ii = rownnz[ne-1] + 1; ii < rownnz[ne]; ii++)
{
diag_i[ii] = diag_i[rownnz[ne]];
offd_i[ii] = offd_i[rownnz[ne]];
}
}
else
{
for (ii = rownnz[ne-1] + 1; ii < num_rows; ii++)
{
diag_i[ii] = diag_i[num_rows];
offd_i[ii] = offd_i[num_rows];
}
}
}
} /* end parallel region */
hypre_TFree(diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(offd_array, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_data;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows];
if (offd_i[num_rows] > 0)
{
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixBigJ(offd) = big_offd_j;
hypre_CSRMatrixData(offd) = offd_data;
}
hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows];
hypre_TFree(diag_pos, HYPRE_MEMORY_HOST);
}
else
{
/* move diagonal element into first space */
big_offd_j = hypre_CSRMatrixBigJ(offd);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private (i,j,j0,temp)
#endif
for (i = 0; i < num_rows; i++)
{
j0 = diag_i[i];
for (j = j0; j < diag_i[i+1]; j++)
{
if (diag_j[j] == i)
{
temp = diag_data[j0];
diag_data[j0] = diag_data[j];
diag_data[j] = temp;
diag_j[j] = diag_j[j0];
diag_j[j0] = i;
break;
}
}
}
offd_j = hypre_CSRMatrixJ(offd);
if (!offd_j && offd_i[num_rows])
{
offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixJ(offd) = offd_j;
}
}
/* generate the nonzero rows inside offd and diag by calling */
hypre_CSRMatrixSetRownnz(diag);
hypre_CSRMatrixSetRownnz(offd);
/* generate col_map_offd */
nnz_offd = offd_i[num_rows];
if (nnz_offd)
{
tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < nnz_offd; i++)
{
tmp_j[i] = big_offd_j[i];
}
hypre_BigQsort0(tmp_j,0,nnz_offd-1);
num_cols_offd = 1;
for (i = 0; i < nnz_offd-1; i++)
{
if (tmp_j[i+1] > tmp_j[i])
{
tmp_j[num_cols_offd++] = tmp_j[i+1];
}
}
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_offd; i++)
{
col_map_offd[i] = tmp_j[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < nnz_offd; i++)
{
offd_j[i] = hypre_BigBinarySearch(col_map_offd,big_offd_j[i],num_cols_offd);
}
if (base)
{
for (i = 0; i < num_cols_offd; i++)
{
col_map_offd[i] -= base;
}
}
hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = NULL;
}
hypre_IJMatrixAssembleFlag(matrix) = 1;
}
hypre_AuxParCSRMatrixDestroy(aux_matrix);
hypre_IJMatrixTranslator(matrix) = NULL;
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixSetValuesOMPParCSR
*
* sets values in an IJMatrix before assembly,
* use of this routine requires that the values in rows are different from each
* other, i.e rows[i] != rows[j] for i != j
* to ensure accurate threading
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
//HYPRE_Int cancel_indx;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
//HYPRE_Int off_proc_i_indx;
//HYPRE_BigInt *off_proc_i;
//HYPRE_BigInt *off_proc_j;
//HYPRE_Int *offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
//HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
//max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
//offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
{
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
return hypre_error_flag;
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_cols_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag; */
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* processor does not own the row */
//else /*search for previous occurrences and cancel them */
/*{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
offproc_cnt[my_thread_num]++; */
/*cancel_indx++;*/
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/*}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
//}
//}
}
} /*end parallel region */
}
else /* matrix not assembled */
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
/*if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}*/
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_data = hypre_CSRMatrixData(offd);
big_offd_j = hypre_CSRMatrixBigJ(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
//HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
/* processor owns the row */
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* processor does not own the row */
/*else
{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1; */
/*cancel_indx++;*/
//offproc_cnt[my_thread_num]++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
/* }
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}*/
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
/*}
}*/
}
} /* end parallel region */
}
/*if (error_flag)
{
return hypre_error_flag;
}
if (aux_matrix)
{
for (i1=0; i1 < max_num_threads; i1++)
{
cancel_indx += offproc_cnt[i1];
}
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}*/
//hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesOMPParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_BigInt col_0, col_n, first;
HYPRE_BigInt **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *big_offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_BigInt *off_proc_i;
HYPRE_BigInt *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int **offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
HYPRE_Int i1;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST);
for (i1=0; i1 < max_num_threads; i1++)
offproc_cnt[i1] = NULL;
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */
{
HYPRE_Int num_cols_offd;
HYPRE_BigInt *col_map_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n;
HYPRE_BigInt row;
HYPRE_Int not_found, size, indx;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" row %b too long! \n", row);
}
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf (" Error, element %b %b does not exist\n",
row, cols[indx]);
}
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* not my row */
/* need to find solution for threaded version!!!! */
/* could save row number and process later .... */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /* end parallel region */
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
big_offd_j = hypre_CSRMatrixBigJ(offd);
offd_data = hypre_CSRMatrixData(offd);
if (!big_offd_j)
{
big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)],
hypre_CSRMatrixMemoryLocation(offd));
hypre_CSRMatrixBigJ(offd) = big_offd_j;
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_BigInt *tmp_j = NULL;
HYPRE_BigInt *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
HYPRE_Int row_local;
HYPRE_BigInt row;
HYPRE_Int i, j, ii, n;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = row_indexes[ii];
if (row >= row_partitioning[0] && row < row_partitioning[1])
{
row_local = (HYPRE_Int)(row - row_partitioning[0]);
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST);
tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt,
size+tmp_indx, HYPRE_MEMORY_HOST);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_data, HYPRE_MEMORY_HOST);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (big_offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
big_offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements!\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0);
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
{
hypre_printf("Error in row %b ! Too many elements !\n",
row);
}
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
}
}
} /*end parallel region */
}
if (error_flag)
{
return hypre_error_flag;
}
if (!aux_matrix)
{
HYPRE_Int size = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]);
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
for (i1 = 0; i1 < max_num_threads; i1++)
{
if (offproc_cnt[i1])
{
HYPRE_Int *my_offproc_cnt = offproc_cnt[i1];
HYPRE_Int i, i2, ii, n, indx;
HYPRE_BigInt row;
for (i2 = 2; i2 < my_offproc_cnt[1]; i2+=2)
{
ii = my_offproc_cnt[i2];
row = rows[ii];
n = ncols ? ncols[ii] : 1;
if (n == 0) /* empty row */
{
continue;
}
indx = my_offproc_cnt[i2+1];
current_num_elmts
= hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts, HYPRE_MEMORY_HOST);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts;
}
hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST);
}
}
hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
DRB022-reductionmissing-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A kernel for two level parallelizable loop with reduction:
if reduction(+:sum) is missing, there is race condition.
Data race pairs:
sum@72:7 vs. sum@72:7
sum@72:7 vs. sum@72:13
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
omprace_init();
int i,j;
float temp, sum=0.0;
int len=100;
if (argc>1)
len = atoi(argv[1]);
float u[len][len];
for (i = 0; i < len; i++)
for (j = 0; j < len; j++)
u[i][j] = 0.5;
#pragma omp parallel for private (temp,i,j)
for (i = 0; i < len; i++)
for (j = 0; j < len; j++)
{
temp = u[i][j];
sum = sum + temp * temp;
}
printf ("sum = %f\n", sum);
omprace_fini();
return 0;
}
|
GB_unaryop__minv_fp64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_uint32
// op(A') function: GB_tran__minv_fp64_uint32
// C type: double
// A type: uint32_t
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_uint32
(
double *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
uts_shm.c | /*
* ---- The Unbalanced Tree Search (UTS) Benchmark ----
*
* Copyright (c) 2010 See AUTHORS file for copyright holders
*
* This file is part of the unbalanced tree search benchmark. This
* project is licensed under the MIT Open Source license. See the LICENSE
* file for copyright and licensing information.
*
* UTS is a collaborative project between researchers at the University of
* Maryland, the University of North Carolina at Chapel Hill, and the Ohio
* State University. See AUTHORS file for more information.
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "uts.h"
/***********************************************************
* *
* Compiler Type (these flags are set by at compile time) *
* (default) ANSI C compiler - sequential execution *
* (_OPENMP) OpenMP enabled C compiler *
* (__UPC__) UPC compiler *
* (_SHMEM) Cray Shmem *
* (__PTHREADS__) Pthreads multithreaded execution *
* *
***********************************************************/
/**** OpenMP Definitions ****/
#ifdef _OPENMP
#include <omp.h>
#define PARALLEL 1
#define COMPILER_TYPE 1
#define SHARED
#define SHARED_INDEF
#define VOLATILE volatile
#define MAX_THREADS 32
#define LOCK_T omp_lock_t
#define GET_NUM_THREADS omp_get_num_threads()
#define GET_THREAD_NUM omp_get_thread_num()
#define SET_LOCK(zlk) omp_set_lock(zlk)
#define UNSET_LOCK(zlk) omp_unset_lock(zlk)
#define INIT_LOCK(zlk) zlk=omp_global_lock_alloc()
#define INIT_SINGLE_LOCK(zlk) zlk=omp_global_lock_alloc()
#define SMEMCPY memcpy
#define ALLOC malloc
#define BARRIER
// OpenMP helper function to match UPC lock allocation semantics
omp_lock_t * omp_global_lock_alloc() {
omp_lock_t *lock = (omp_lock_t *) malloc(sizeof(omp_lock_t) + 128);
omp_init_lock(lock);
return lock;
}
/**** UPC Definitions ****/
#elif defined(__UPC__)
#include <upc.h>
#define PARALLEL 1
#define COMPILER_TYPE 2
#define SHARED shared
#define SHARED_INDEF shared [0]
#define VOLATILE strict
#define MAX_THREADS (THREADS)
#define LOCK_T upc_lock_t
#define GET_NUM_THREADS (THREADS)
#define GET_THREAD_NUM (MYTHREAD)
#define SET_LOCK(zlk) upc_lock(zlk)
#define UNSET_LOCK(zlk) upc_unlock(zlk)
#define INIT_LOCK(zlk) zlk=upc_global_lock_alloc()
#define INIT_SINGLE_LOCK(zlk) zlk=upc_all_lock_alloc()
#define SMEMCPY upc_memget
#define ALLOC upc_alloc
#define BARRIER upc_barrier;
/**** Shmem Definitions ****/
#elif defined(_SHMEM)
#include <mpp/shmem.h>
#define PARALLEL 1
#define COMPILER_TYPE 3
#define SHARED
#define SHARED_INDEF
#define VOLATILE volatile
#define MAX_THREADS 64
#define LOCK_T long
#define GET_NUM_THREADS shmem_n_pes()
#define GET_THREAD_NUM shmem_my_pe()
#define SET_LOCK(zlk) shmem_set_lock(zlk)
#define UNSET_LOCK(zlk) shmem_clear_lock(zlk)
#define INIT_LOCK(zlk) zlk = shmem_global_lock_alloc()
#define INIT_SINGLE_LOCK(zlk) zlk = shmem_global_lock_alloc()
#define SMEMCPY shmem_getmem
// Shmem's get has different semantics from memcpy():
// void shmem_getmem(void *target, const void *source, size_t len, int pe)
#define ALLOC shmalloc
#define BARRIER shmem_barrier_all();
// Shmem helper function to match UPC lock allocation semantics
LOCK_T * shmem_global_lock_alloc() {
LOCK_T *lock = (LOCK_T *) shmalloc(sizeof(LOCK_T));
*lock = 0;
return lock;
}
#define GET(target,source,from_id) shmem_int_get(&(target),&(source),1,from_id)
#define PUT(target,source,to_id) shmem_int_put(&(target),&(source),1,to_id)
#define PUT_ALL(a,b) \
do { \
int _iter, _node; \
for (_iter = 1; _iter < GET_NUM_THREADS; _iter++) { \
_node = (GET_THREAD_NUM + _iter) % GET_NUM_THREADS; \
shmem_int_put((int *)&a,(int *)&b,1,_node); \
} \
} while(0)
/**** Pthreads Definitions ****/
#elif defined(__PTHREADS__)
#include <pthread.h>
#define PARALLEL 1
#define COMPILER_TYPE 4
#define SHARED
#define SHARED_INDEF
#define VOLATILE volatile
#define MAX_THREADS 128
#define LOCK_T pthread_mutex_t
#define GET_NUM_THREADS pthread_num_threads
#define GET_THREAD_NUM *(int*)pthread_getspecific(pthread_thread_num)
#define SET_LOCK(zlk) pthread_mutex_lock(zlk)
#define UNSET_LOCK(zlk) pthread_mutex_unlock(zlk)
#define INIT_LOCK(zlk) zlk = pthread_global_lock_alloc()
#define INIT_SINGLE_LOCK(zlk) zlk = pthread_global_lock_alloc()
#define SMEMCPY memcpy
#define ALLOC malloc
#define BARRIER
int pthread_num_threads = 1; // Command line parameter - default to 1
pthread_key_t pthread_thread_num; // Key to store each thread's ID
/* helper function to match UPC lock allocation semantics */
LOCK_T * pthread_global_lock_alloc() {
LOCK_T *lock = (LOCK_T *) malloc(sizeof(LOCK_T));
pthread_mutex_init(lock, NULL);
return lock;
}
/**** Default Sequential Definitions ****/
#else
#define PARALLEL 0
#define COMPILER_TYPE 0
#define SHARED
#define SHARED_INDEF
#define VOLATILE
#define MAX_THREADS 1
#define LOCK_T void
#define GET_NUM_THREADS 1
#define GET_THREAD_NUM 0
#define SET_LOCK(zlk)
#define UNSET_LOCK(zlk)
#define INIT_LOCK(zlk)
#define INIT_SINGLE_LOCK(zlk)
#define SMEMCPY memcpy
#define ALLOC malloc
#define BARRIER
#endif /* END Par. Model Definitions */
/***********************************************************
* Parallel execution parameters *
***********************************************************/
int doSteal = PARALLEL; // 1 => use work stealing
int chunkSize = 20; // number of nodes to move to/from shared area
int cbint = 1; // Cancellable barrier polling interval
int pollint = 1; // BUPC Polling interval
#ifdef __BERKELEY_UPC__
/* BUPC nonblocking I/O Handles */
bupc_handle_t cb_handle = BUPC_COMPLETE_HANDLE;
const int local_cb_cancel = 1;
#endif
/***********************************************************
* Tree statistics (if selected via UTS_STAT) *
* compute overall size and imbalance metrics *
* and histogram size and imbalance per level *
***********************************************************/
#ifdef UTS_STAT
/* Check that we are not being asked to compile parallel with stats.
* Parallel stats collection is presently not supported. */
#if PARALLEL
#error "ERROR: Parallel stats collection is not supported!"
#endif
#define MAXHISTSIZE 2000 // max tree depth in histogram
int stats = 1;
int unbType = 1;
int maxHeight = 0; // maximum depth of tree
double maxImb = 0; // maximum imbalance
double minImb = 1;
double treeImb =-1; // Overall imbalance, undefined
int hist[MAXHISTSIZE+1][2]; // average # nodes per level
double unbhist[MAXHISTSIZE+1][3]; // average imbalance per level
int *rootSize; // size of the root's children
double *rootUnb; // imbalance of root's children
/* Tseng statistics */
int totalNodes = 0;
double imb_max = 0; // % of work in largest child (ranges from 100/n to 100%)
double imb_avg = 0;
double imb_devmaxavg = 0; // ( % of work in largest child ) - ( avg work )
double imb_normdevmaxavg = 0; // ( % of work in largest child ) - ( avg work ) / ( 100% - avg work )
#else
int stats = 0;
int unbType = -1;
#endif
/***********************************************************
* Execution Tracing *
***********************************************************/
#define SS_WORK 0
#define SS_SEARCH 1
#define SS_IDLE 2
#define SS_OVH 3
#define SS_CBOVH 4
#define SS_NSTATES 5
/* session record for session visualization */
struct sessionRecord_t {
double startTime, endTime;
};
typedef struct sessionRecord_t SessionRecord;
/* steal record for steal visualization */
struct stealRecord_t {
long int nodeCount; /* count nodes generated during the session */
int victimThread; /* thread from which we stole the work */
};
typedef struct stealRecord_t StealRecord;
/* Store debugging and trace data */
struct metaData_t {
SessionRecord sessionRecords[SS_NSTATES][20000]; /* session time records */
StealRecord stealRecords[20000]; /* steal records */
};
typedef struct metaData_t MetaData;
/* holds text string for debugging info */
char debug_str[1000];
/***********************************************************
* StealStack types *
***********************************************************/
#define MAXSTACKDEPTH 500000
/* stack of nodes */
struct stealStack_t
{
int stackSize; /* total space avail (in number of elements) */
int workAvail; /* elements available for stealing */
int sharedStart; /* index of start of shared portion of stack */
int local; /* index of start of local portion */
int top; /* index of stack top */
int maxStackDepth; /* stack stats */
int nNodes, maxTreeDepth; /* tree stats */
int nLeaves;
int nAcquire, nRelease, nSteal, nFail; /* steal stats */
int wakeups, falseWakeups, nNodes_last;
double time[SS_NSTATES], timeLast; /* perf measurements */
int entries[SS_NSTATES], curState;
LOCK_T * stackLock; /* lock for manipulation of shared portion */
Node * stack; /* addr of actual stack of nodes in local addr space */
SHARED_INDEF Node * stack_g; /* addr of same stack in global addr space */
#ifdef TRACE
MetaData * md; /* meta data used for debugging and tracing */
#endif
};
typedef struct stealStack_t StealStack;
typedef SHARED StealStack * SharedStealStackPtr;
/***********************************************************
* Global shared state *
***********************************************************/
// shared access to each thread's stealStack
SHARED SharedStealStackPtr stealStack[MAX_THREADS];
// termination detection
VOLATILE SHARED int cb_cancel;
VOLATILE SHARED int cb_count;
VOLATILE SHARED int cb_done;
LOCK_T * cb_lock;
SHARED double startTime[MAX_THREADS];
/***********************************************************
* UTS Implementation Hooks *
***********************************************************/
// Return a string describing this implementation
char * impl_getName() {
char * name[] = {"Sequential C", "C/OpenMP", "UPC", "SHMEM", "PThreads"};
return name[COMPILER_TYPE];
}
// construct string with all parameter settings
int impl_paramsToStr(char *strBuf, int ind) {
ind += sprintf(strBuf+ind, "Execution strategy: ");
if (PARALLEL) {
ind += sprintf(strBuf+ind, "Parallel search using %d threads\n", GET_NUM_THREADS);
if (doSteal) {
ind += sprintf(strBuf+ind, " Load balance by work stealing, chunk size = %d nodes\n",chunkSize);
ind += sprintf(strBuf+ind, " CBarrier Interval: %d\n", cbint);
ind += sprintf(strBuf+ind, " Polling Interval: %d\n", pollint);
}
else
ind += sprintf(strBuf+ind, " No load balancing.\n");
}
else
ind += sprintf(strBuf+ind, "Iterative sequential search\n");
return ind;
}
int impl_parseParam(char *param, char *value) {
int err = 0; // Return 0 on a match, nonzero on an error
switch (param[1]) {
#if (PARALLEL == 1)
case 'c':
chunkSize = atoi(value); break;
case 's':
doSteal = atoi(value);
if (doSteal != 1 && doSteal != 0)
err = 1;
break;
case 'i':
cbint = atoi(value); break;
#ifdef __BERKELEY_UPC__
case 'I':
pollint = atoi(value); break;
#endif
#ifdef __PTHREADS__
case 'T':
pthread_num_threads = atoi(value);
if (pthread_num_threads > MAX_THREADS) {
printf("Warning: Requested threads > MAX_THREADS. Truncated to %d threads\n", MAX_THREADS);
pthread_num_threads = MAX_THREADS;
}
break;
#endif
#else /* !PARALLEL */
#ifdef UTS_STAT
case 'u':
unbType = atoi(value);
if (unbType > 2) {
err = 1;
break;
}
if (unbType < 0)
stats = 0;
else
stats = 1;
break;
#endif
#endif /* PARALLEL */
default:
err = 1;
break;
}
return err;
}
void impl_helpMessage() {
if (PARALLEL) {
printf(" -s int zero/nonzero to disable/enable work stealing\n");
printf(" -c int chunksize for work stealing\n");
printf(" -i int set cancellable barrier polling interval\n");
#ifdef __BERKELEY_UPC__
printf(" -I int set working bupc_poll() interval\n");
#endif
#ifdef __PTHREADS__
printf(" -T int set number of threads\n");
#endif
} else {
#ifdef UTS_STAT
printf(" -u int unbalance measure (-1: none; 0: min/size; 1: min/n; 2: max/n)\n");
#else
printf(" none.\n");
#endif
}
}
void impl_abort(int err) {
#if defined(__UPC__)
upc_global_exit(err);
#elif defined(_OPENMP)
exit(err);
#elif defined(_SHMEM)
exit(err);
#else
exit(err);
#endif
}
/***********************************************************
* *
* FUNCTIONS *
* *
***********************************************************/
/*
* StealStack
* Stack of nodes with sharing at the bottom of the stack
* and exclusive access at the top for the "owning" thread
* which has affinity to the stack's address space.
*
* * All operations on the shared portion of the stack
* must be guarded using the stack-specific lock.
* * Elements move between the shared and exclusive
* portion of the stack solely under control of the
* owning thread. (ss_release and ss_acquire)
* * workAvail is the count of elements in the shared
* portion of the stack. It may be read without
* acquiring the stack lock, but of course its value
* may not be acurate. Idle threads read workAvail in
* this speculative fashion to minimize overhead to
* working threads.
* * Elements can be stolen from the bottom of the shared
* portion by non-owning threads. The values are
* reserved under lock by the stealing thread, and then
* copied without use of the lock (currently space for
* reserved values is never reclaimed).
*
*/
/* restore stack to empty state */
void ss_mkEmpty(StealStack *s) {
SET_LOCK(s->stackLock);
s->sharedStart = 0;
s->local = 0;
s->top = 0;
s->workAvail = 0;
UNSET_LOCK(s->stackLock);
}
/* fatal error */
void ss_error(char *str) {
printf("*** [Thread %i] %s\n",GET_THREAD_NUM, str);
exit(4);
}
/* initialize the stack */
void ss_init(StealStack *s, int nelts) {
int nbytes = nelts * sizeof(Node);
if (debug & 1)
printf("Thread %d intializing stealStack %p, sizeof(Node) = %X\n",
GET_THREAD_NUM, s, (int)(sizeof(Node)));
// allocate stack in shared addr space with affinity to calling thread
// and record local addr for efficient access in sequel
s->stack_g = (SHARED_INDEF Node *) ALLOC (nbytes);
s->stack = (Node *) s->stack_g;
#ifdef TRACE
s->md = (MetaData *) ALLOC (sizeof(MetaData));
if (s->md == NULL)
ss_error("ss_init: out of memory");
#endif
if (s->stack == NULL) {
printf("Request for %d bytes for stealStack on thread %d failed\n",
nbytes, GET_THREAD_NUM);
ss_error("ss_init: unable to allocate space for stealstack");
}
INIT_LOCK(s->stackLock);
if (debug & 1)
printf("Thread %d init stackLock %p\n", GET_THREAD_NUM, (void *) s->stackLock);
s->stackSize = nelts;
s->nNodes = 0;
s->maxStackDepth = 0;
s->maxTreeDepth = 0;
s->nLeaves = 0;
s->nAcquire = 0;
s->nRelease = 0;
s->nSteal = 0;
s->nFail = 0;
s->wakeups = 0;
s->falseWakeups = 0;
s->nNodes_last = 0;
ss_mkEmpty(s);
}
/* local push */
void ss_push(StealStack *s, Node *c) {
if (s->top >= s->stackSize)
ss_error("ss_push: overflow");
if (debug & 1)
printf("ss_push: Thread %d, posn %d: node %s [%d]\n",
GET_THREAD_NUM, s->top, rng_showstate(c->state.state, debug_str), c->height);
memcpy(&(s->stack[s->top]), c, sizeof(Node));
s->top++;
s->nNodes++;
s->maxStackDepth = max(s->top, s->maxStackDepth);
s->maxTreeDepth = max(s->maxTreeDepth, c->height);
}
/* local top: get local addr of node at top */
Node * ss_top(StealStack *s) {
Node *r;
if (s->top <= s->local)
ss_error("ss_top: empty local stack");
r = &(s->stack[(s->top) - 1]);
if (debug & 1)
printf("ss_top: Thread %d, posn %d: node %s [%d] nchild = %d\n",
GET_THREAD_NUM, s->top - 1, rng_showstate(r->state.state, debug_str),
r->height, r->numChildren);
return r;
}
/* local pop */
void ss_pop(StealStack *s) {
Node *r;
if (s->top <= s->local)
ss_error("ss_pop: empty local stack");
s->top--;
r = &(s->stack[s->top]);
if (debug & 1)
printf("ss_pop: Thread %d, posn %d: node %s [%d] nchild = %d\n",
GET_THREAD_NUM, s->top, rng_showstate(r->state.state, debug_str),
r->height, r->numChildren);
}
/* local top position: stack index of top element */
int ss_topPosn(StealStack *s)
{
if (s->top <= s->local)
ss_error("ss_topPosn: empty local stack");
return s->top - 1;
}
/* local depth */
int ss_localDepth(StealStack *s) {
return (s->top - s->local);
}
/* release k values from bottom of local stack */
void ss_release(StealStack *s, int k) {
SET_LOCK(s->stackLock);
if (s->top - s->local >= k) {
s->local += k;
s->workAvail += k;
s->nRelease++;
}
else
ss_error("ss_release: do not have k vals to release");
UNSET_LOCK(s->stackLock);
}
/* move k values from top of shared stack into local stack
* return false if k vals are not avail on shared stack
*/
int ss_acquire(StealStack *s, int k) {
int avail;
SET_LOCK(s->stackLock);
avail = s->local - s->sharedStart;
if (avail >= k) {
s->local -= k;
s->workAvail -= k;
s->nAcquire++;
}
UNSET_LOCK(s->stackLock);
return (avail >= k);
}
/* steal k values from shared portion of victim thread's stealStack
* onto local portion of current thread's stealStack.
* return false if k vals are not avail in victim thread
*/
int ss_steal(StealStack *s, int victim, int k) {
int victimLocal, victimShared, victimWorkAvail;
int ok;
if (s->sharedStart != s->top)
ss_error("ss_steal: thief attempts to steal onto non-empty stack");
if (s->top + k >= s->stackSize)
ss_error("ss_steal: steal will overflow thief's stack");
/* lock victim stack and try to reserve k elts */
if (debug & 32)
printf("Thread %d wants SS %d\n", GET_THREAD_NUM, victim);
SET_LOCK(stealStack[victim]->stackLock);
#ifdef _SHMEM
/* Get remote steal stack */
SMEMCPY(stealStack[victim], stealStack[victim], sizeof(StealStack), victim);
#endif
if (debug & 32)
printf("Thread %d acquires SS %d\n", GET_THREAD_NUM, victim);
victimLocal = stealStack[victim]->local;
victimShared = stealStack[victim]->sharedStart;
victimWorkAvail = stealStack[victim]->workAvail;
if (victimLocal - victimShared != victimWorkAvail)
ss_error("ss_steal: stealStack invariant violated");
ok = victimWorkAvail >= k;
if (ok) {
/* reserve a chunk */
stealStack[victim]->sharedStart = victimShared + k;
stealStack[victim]->workAvail = victimWorkAvail - k;
#ifdef _SHMEM
// FIXME: These transfers ought to be combined. They can't be
// though because the data protected by the stacklock is not
// the only data in the StealStack structure.
PUT(stealStack[victim]->sharedStart, stealStack[victim]->sharedStart, victim);
PUT(stealStack[victim]->workAvail, stealStack[victim]->workAvail, victim);
#endif
}
UNSET_LOCK(stealStack[victim]->stackLock);
if (debug & 32)
printf("Thread %d releases SS %d\n", GET_THREAD_NUM, victim);
/* if k elts reserved, move them to local portion of our stack */
if (ok) {
SHARED_INDEF Node * victimStackBase = stealStack[victim]->stack_g;
SHARED_INDEF Node * victimSharedStart = victimStackBase + victimShared;
#ifdef _SHMEM
SMEMCPY(&(s->stack[s->top]), victimSharedStart, k * sizeof(Node), victim);
#else
SMEMCPY(&(s->stack[s->top]), victimSharedStart, k * sizeof(Node));
#endif
s->nSteal++;
if (debug & 4) {
int i;
for (i = 0; i < k; i ++) {
Node * r = &(s->stack[s->top + i]);
printf("ss_steal: Thread %2d posn %d (steal #%d) receives %s [%d] from thread %d posn %d (%p)\n",
GET_THREAD_NUM, s->top + i, s->nSteal,
rng_showstate(r->state.state, debug_str),
r->height, victim, victimShared + i,
(void *) victimSharedStart);
}
}
s->top += k;
#ifdef TRACE
/* update session record of theif */
s->md->stealRecords[s->entries[SS_WORK]].victimThread = victim;
#endif
}
else {
s->nFail++;
if (debug & 4) {
printf("Thread %d failed to steal %d nodes from thread %d, ActAv = %d, sh = %d, loc =%d\n",
GET_THREAD_NUM, k, victim, victimWorkAvail, victimShared, victimLocal);
}
}
return (ok);
}
/* search other threads for work to steal */
int findwork(int k) {
int i,v;
for (i = 1; i < GET_NUM_THREADS; i++) {
v = (GET_THREAD_NUM + i) % GET_NUM_THREADS;
#ifdef _SHMEM
GET(stealStack[v]->workAvail, stealStack[v]->workAvail, v);
#endif
if (stealStack[v]->workAvail >= k)
return v;
}
return -1;
}
/**
* Tracing functions
* Track changes in the search state for offline analysis.
**/
void ss_initState(StealStack *s) {
int i;
s->timeLast = uts_wctime();
for (i = 0; i < SS_NSTATES; i++) {
s->time[i] = 0.0;
s->entries[i] = 0;
}
s->curState = SS_IDLE;
if (debug & 8)
printf("Thread %d start state %d (t = %f)\n",
GET_THREAD_NUM, s->curState, s->timeLast);
}
void ss_setState(StealStack *s, int state){
double time;
if (state < 0 || state >= SS_NSTATES)
ss_error("ss_setState: thread state out of range");
if (state == s->curState)
return;
time = uts_wctime();
s->time[s->curState] += time - s->timeLast;
#ifdef TRACE
/* close out last session record */
s->md->sessionRecords[s->curState][s->entries[s->curState] - 1].endTime = time;
if (s->curState == SS_WORK)
{
s->md->stealRecords[s->entries[SS_WORK] - 1].nodeCount = s->nNodes
- s->md->stealRecords[s->entries[SS_WORK] - 1].nodeCount;
}
/* initialize new session record */
s->md->sessionRecords[state][s->entries[state]].startTime = time;
if (state == SS_WORK)
{
s->md->stealRecords[s->entries[SS_WORK]].nodeCount = s->nNodes;
}
#endif
s->entries[state]++;
s->timeLast = time;
s->curState = state;
if(debug & 8)
printf("Thread %d enter state %d [#%d] (t = %f)\n",
GET_THREAD_NUM, state, s->entries[state], time);
}
#ifdef UTS_STAT
/*
* Statistics,
* : number of nodes per level
* : imbalanceness of nodes per level
*
*/
void initHist()
{
int i;
for (i=0; i<MAXHISTSIZE; i++){
hist[i][0]=0;
hist[i][1]=0;
unbhist[i][1]=1;
unbhist[i][2]=0;
}
}
void updateHist(Node* c, double unb)
{
if (c->height<MAXHISTSIZE){
hist[c->height][1]++;
hist[c->height][0]+=c->numChildren;
unbhist[c->height][0]+=unb;
if (unbhist[c->height][1]>unb)
unbhist[c->height][1]=unb;
if (unbhist[c->height][2]<unb)
unbhist[c->height][2]=unb;
}
else {
hist[MAXHISTSIZE][1]++;
hist[MAXHISTSIZE][0]+=c->numChildren;
}
}
void showHist(FILE *fp)
{
int i;
fprintf(fp, "depth\tavgNumChildren\t\tnumChildren\t imb\t maxImb\t minImb\t\n");
for (i=0; i<MAXHISTSIZE; i++){
if ((hist[i][0]!=0)&&(hist[i][1]!=0))
fprintf(fp, "%d\t%f\t%d\t %lf\t%lf\t%lf\n", i, (double)hist[i][0]/hist[i][1],
hist[i][0], unbhist[i][0]/hist[i][1], unbhist[i][1], unbhist[i][2]);
}
}
double getImb(Node *c)
{
int i=0;
double avg=.0, tmp=.0;
double unb=0.0;
avg=(double)c->sizeChildren/c->numChildren;
for (i=0; i<c->numChildren; i++){
if ((type==BIN)&&(c->pp==NULL))
{
if (unbType<2)
tmp=min((double)rootSize[i]/avg, avg/(double)rootSize[i]);
else
tmp=max((double)rootSize[i]/avg, avg/(double)rootSize[i]);
if (unbType>0)
unb+=tmp*rootUnb[i];
else
unb+=tmp*rootUnb[i]*rootSize[i];
}
else{
if (unbType<2)
tmp=min((double)c->size[i]/avg, avg/(double)c->size[i]);
else
tmp=max((double)c->size[i]/avg, avg/(double)c->size[i]);
if (unbType>0)
unb+=tmp*c->unb[i];
else
unb+=tmp*c->unb[i]*c->size[i];
}
}
if (unbType>0){
if (c->numChildren>0)
unb=unb/c->numChildren;
else unb=1.0;
}
else {
if (c->sizeChildren>1)
unb=unb/c->sizeChildren;
else unb=1.0;
}
if ((debug & 1) && unb>1) printf("unb>1%lf\t%d\n", unb, c->numChildren);
return unb;
}
void getImb_Tseng(Node *c)
{
double t_max, t_avg, t_devmaxavg, t_normdevmaxavg;
if (c->numChildren==0)
{
t_avg =0;
t_max =0;
}
else
{
t_max = (double)c->maxSizeChildren/(c->sizeChildren-1);
t_avg = (double)1/c->numChildren;
}
t_devmaxavg = t_max-t_avg;
if (debug & 1)
printf("max\t%lf, %lf, %d, %d, %d\n", t_max, t_avg,
c->maxSizeChildren, c->sizeChildren, c->numChildren);
if (1-t_avg==0)
t_normdevmaxavg = 1;
else
t_normdevmaxavg = (t_max-t_avg)/(1-t_avg);
imb_max += t_max;
imb_avg += t_avg;
imb_devmaxavg += t_devmaxavg;
imb_normdevmaxavg +=t_normdevmaxavg;
}
void updateParStat(Node *c)
{
double unb;
totalNodes++;
if (maxHeight<c->height)
maxHeight=c->height;
unb=getImb(c);
maxImb=max(unb, maxImb);
minImb=min(unb, minImb);
updateHist(c, unb);
getImb_Tseng(c);
if (c->pp!=NULL){
if ((c->type==BIN)&&(c->pp->pp==NULL)){
rootSize[c->pp->ind]=c->sizeChildren;
rootUnb[c->pp->ind]=unb;
}
else{
c->pp->size[c->pp->ind]=c->sizeChildren;
c->pp->unb[c->pp->ind]=unb;
}
/* update statistics per node*/
c->pp->ind++;
c->pp->sizeChildren+=c->sizeChildren;
if (c->pp->maxSizeChildren<c->sizeChildren)
c->pp->maxSizeChildren=c->sizeChildren;
}
else
treeImb = unb;
}
#endif
/*
* Tree Implementation
*
*/
void initNode(Node * child)
{
child->type = -1;
child->height = -1;
child->numChildren = -1; // not yet determined
#ifdef UTS_STAT
if (stats){
int i;
child->ind = 0;
child->sizeChildren = 1;
child->maxSizeChildren = 0;
child->pp = NULL;
for (i = 0; i < MAXNUMCHILDREN; i++){
child->size[i] = 0;
child->unb[i] = 0.0;
}
}
#endif
}
void initRootNode(Node * root, int type)
{
uts_initRoot(root, type);
#ifdef TRACE
stealStack[0]->md->stealRecords[0].victimThread = 0; // first session is own "parent session"
#endif
#ifdef UTS_STAT
if (stats){
int i;
root->ind = 0;
root->sizeChildren = 1;
root->maxSizeChildren = 1;
root->pp = NULL;
if (type != BIN){
for (i=0; i<MAXNUMCHILDREN; i++){
root->size[i] = 0;
root->unb[i] =.0;
}
}
else {
int rbf = (int) ceil(b_0);
rootSize = malloc(rbf*sizeof(int));
rootUnb = malloc(rbf*sizeof(double));
for (i = 0; i < rbf; i++) {
rootSize[i] = 0;
rootUnb[i] = 0.0;
}
}
}
#endif
}
// forward decl
void releaseNodes(StealStack *ss);
/*
* Generate all children of the parent
*
* details depend on tree type, node type and shape function
*
*/
void genChildren(Node * parent, Node * child, StealStack * ss) {
int parentHeight = parent->height;
int numChildren, childType;
numChildren = uts_numChildren(parent);
childType = uts_childType(parent);
// record number of children in parent
parent->numChildren = numChildren;
if (debug & 2) {
printf("Gen: Thread %d, posn %2d: node %s [%d] has %2d children\n",
GET_THREAD_NUM, ss_topPosn(ss),
rng_showstate(parent->state.state, debug_str),
parentHeight, numChildren);
}
// construct children and push onto stack
if (numChildren > 0) {
int i, j;
child->type = childType;
child->height = parentHeight + 1;
#ifdef UTS_STAT
if (stats)
child->pp = parent; // pointer to parent
#endif
for (i = 0; i < numChildren; i++) {
for (j = 0; j < computeGranularity; j++) {
// TBD: add parent height to spawn
// computeGranularity controls number of rng_spawn calls per node
rng_spawn(parent->state.state, child->state.state, i);
}
ss_push(ss, child);
releaseNodes(ss);
}
} else {
ss->nLeaves++;
}
}
/*
* Parallel tree traversal
*
*/
// cancellable barrier
// initialize lock: single thread under omp, all threads under upc
void cb_init(){
INIT_SINGLE_LOCK(cb_lock);
if (debug & 4)
printf("Thread %d, cb lock at %p\n", GET_THREAD_NUM, (void *) cb_lock);
// fixme: no need for all upc threads to repeat this
SET_LOCK(cb_lock);
cb_count = 0;
cb_cancel = 0;
cb_done = 0;
UNSET_LOCK(cb_lock);
}
// delay this thread until all threads arrive at barrier
// or until barrier is cancelled
int cbarrier_wait() {
int l_count, l_done, l_cancel;
int pe = GET_THREAD_NUM;
SET_LOCK(cb_lock);
cb_count++;
#ifdef _SHMEM
PUT_ALL(cb_count, cb_count);
#endif
if (cb_count == GET_NUM_THREADS) {
cb_done = 1;
#ifdef _SHMEM
PUT_ALL(cb_done, cb_done);
#endif
}
l_count = cb_count;
l_done = cb_done;
if (stealStack[pe]->nNodes_last == stealStack[pe]->nNodes) {
++stealStack[pe]->falseWakeups;
}
stealStack[GET_THREAD_NUM]->nNodes_last = stealStack[pe]->nNodes;
UNSET_LOCK(cb_lock);
if (debug & 16)
printf("Thread %d enter spin-wait, count = %d, done = %d\n",
GET_THREAD_NUM, l_count, l_done);
// spin
do {
#ifdef __BERKELEY_UPC__
bupc_poll();
#endif
l_count = cb_count;
l_cancel = cb_cancel;
l_done = cb_done;
}
while (!l_cancel && !l_done);
if (debug & 16)
printf("Thread %d exit spin-wait, count = %d, done = %d, cancel = %d\n",
GET_THREAD_NUM, l_count, l_done, l_cancel);
SET_LOCK(cb_lock);
cb_count--;
l_count = cb_count;
#ifdef _SHMEM
PUT_ALL(cb_count, cb_count);
#endif
cb_cancel = 0;
l_done = cb_done;
++stealStack[GET_THREAD_NUM]->wakeups;
UNSET_LOCK(cb_lock);
if (debug & 16)
printf("Thread %d exit idle state, count = %d, done = %d\n",
GET_THREAD_NUM, l_count, cb_done);
return cb_done;
}
// causes one or more threads waiting at barrier, if any,
// to be released
void cbarrier_cancel() {
#ifdef _SHMEM
cb_cancel = 1;
PUT_ALL(cb_cancel, cb_cancel);
#elif defined (__BERKELEY_UPC__)
bupc_waitsync(cb_handle);
cb_handle = bupc_memput_async((shared void*)&cb_cancel, (const void*)&local_cb_cancel, sizeof(int));
#else
cb_cancel = 1;
#endif /* _SHMEM */
}
void releaseNodes(StealStack *ss){
if (doSteal) {
if (ss_localDepth(ss) > 2 * chunkSize) {
// Attribute this time to runtime overhead
ss_setState(ss, SS_OVH);
ss_release(ss, chunkSize);
// This has significant overhead on clusters!
if (ss->nNodes % cbint == 0) {
ss_setState(ss, SS_CBOVH);
cbarrier_cancel();
}
#ifdef __BERKELEY_UPC__
if (ss->nNodes % pollint == 0) {
ss_setState(ss, SS_OVH);
bupc_poll();
}
#endif
ss_setState(ss, SS_WORK);
}
}
}
/*
* parallel search of UTS trees using work stealing
*
* Note: tree size is measured by the number of
* push operations
*/
void parTreeSearch(StealStack *ss) {
int done = 0;
Node * parent;
Node child;
/* template for children */
initNode(&child);
/* tree search */
while (done == 0) {
/* local work */
while (ss_localDepth(ss) > 0) {
ss_setState(ss, SS_WORK);
/* examine node at stack top */
parent = ss_top(ss);
if (parent->numChildren < 0){
// first time visited, construct children and place on stack
genChildren(parent,&child,ss);
}
else {
// second time visit, process accumulated statistics and pop
#ifdef UTS_STAT
if (stats)
updateParStat(parent);
#endif
ss_pop(ss);
}
// release some nodes for stealing, if enough are available
// and wake up quiescent threads
releaseNodes(ss);
}
/* local work exhausted on this stack - resume tree search if able
* to re-acquire work from shared portion of this thread's stack
*/
if (ss_acquire(ss, chunkSize))
continue;
/* no work left in this thread's stack */
/* try to steal work from another thread's stack */
if (doSteal) {
int goodSteal = 0;
int victimId;
ss_setState(ss, SS_SEARCH);
victimId = findwork(chunkSize);
while (victimId != -1 && !goodSteal) {
// some work detected, try to steal it
goodSteal = ss_steal(ss, victimId, chunkSize);
if (!goodSteal)
victimId = findwork(chunkSize);
}
if (goodSteal)
continue;
}
/* unable to steal work from shared portion of other stacks -
* enter quiescent state waiting for termination (done != 0)
* or cancellation because some thread has made work available
* (done == 0).
*/
ss_setState(ss, SS_IDLE);
done = cbarrier_wait();
}
/* tree search complete ! */
}
#ifdef __PTHREADS__
/* Pthreads ParTreeSearch Arguments */
struct pthread_args {
StealStack *ss;
int id;
};
/* Pthreads ParTreeSearch Wrapper */
void * pthread_spawn_search(void *arg)
{
pthread_setspecific(pthread_thread_num, &((struct pthread_args*)arg)->id);
parTreeSearch(((struct pthread_args*)arg)->ss);
return NULL;
}
#endif /* __PTHREADS__ */
#ifdef TRACE
// print session records for each thread (used when trace is enabled)
void printSessionRecords()
{
int i, j, k;
double offset;
for (i = 0; i < GET_NUM_THREADS; i++) {
offset = startTime[i] - startTime[0];
for (j = 0; j < SS_NSTATES; j++)
for (k = 0; k < stealStack[i]->entries[j]; k++) {
printf ("%d %d %f %f", i, j,
stealStack[i]->md->sessionRecords[j][k].startTime - offset,
stealStack[i]->md->sessionRecords[j][k].endTime - offset);
if (j == SS_WORK)
printf (" %d %ld",
stealStack[i]->md->stealRecords[k].victimThread,
stealStack[i]->md->stealRecords[k].nodeCount);
printf ("\n");
}
}
}
#endif
// display search statistics
void showStats(double elapsedSecs) {
int i;
int tnodes = 0, tleaves = 0, trel = 0, tacq = 0, tsteal = 0, tfail= 0;
int mdepth = 0, mheight = 0;
double twork = 0.0, tsearch = 0.0, tidle = 0.0, tovh = 0.0, tcbovh = 0.0;
#ifdef _SHMEM
{
int pe;
/* Assemble all of the stealstacks so we can gather some stats. */
for (i = 1; i < GET_NUM_THREADS; i++) {
pe = (GET_THREAD_NUM + i) % GET_NUM_THREADS;
/* Collect up all of the StealStacks */
SMEMCPY(stealStack[pe], stealStack[pe], sizeof(StealStack), pe);
#ifdef TRACE
/* Get the MetaData as well */
SMEMCPY(stealStack[pe]->md, stealStack[pe]->md, sizeof(StealStack), pe);
#endif
}
}
#endif
// combine measurements from all threads
for (i = 0; i < GET_NUM_THREADS; i++) {
tnodes += stealStack[i]->nNodes;
tleaves += stealStack[i]->nLeaves;
trel += stealStack[i]->nRelease;
tacq += stealStack[i]->nAcquire;
tsteal += stealStack[i]->nSteal;
tfail += stealStack[i]->nFail;
twork += stealStack[i]->time[SS_WORK];
tsearch += stealStack[i]->time[SS_SEARCH];
tidle += stealStack[i]->time[SS_IDLE];
tovh += stealStack[i]->time[SS_OVH];
tcbovh += stealStack[i]->time[SS_CBOVH];
mdepth = max(mdepth, stealStack[i]->maxStackDepth);
mheight = max(mheight, stealStack[i]->maxTreeDepth);
}
if (trel != tacq + tsteal) {
printf("*** error! total released != total acquired + total stolen\n");
}
uts_showStats(GET_NUM_THREADS, chunkSize, elapsedSecs, tnodes, tleaves, mheight);
if (verbose > 1) {
if (doSteal) {
printf("Total chunks released = %d, of which %d reacquired and %d stolen\n",
trel, tacq, tsteal);
printf("Failed steal operations = %d, ", tfail);
}
printf("Max stealStack size = %d\n", mdepth);
printf("Avg time per thread: Work = %.6f, Search = %.6f, Idle = %.6f\n", (twork / GET_NUM_THREADS),
(tsearch / GET_NUM_THREADS), (tidle / GET_NUM_THREADS));
printf(" Overhead = %6f, CB_Overhead = %6f\n\n", (tovh / GET_NUM_THREADS),
(tcbovh/GET_NUM_THREADS));
}
// per thread execution info
if (verbose > 2) {
for (i = 0; i < GET_NUM_THREADS; i++) {
printf("** Thread %d\n", i);
printf(" # nodes explored = %d\n", stealStack[i]->nNodes);
printf(" # chunks released = %d\n", stealStack[i]->nRelease);
printf(" # chunks reacquired = %d\n", stealStack[i]->nAcquire);
printf(" # chunks stolen = %d\n", stealStack[i]->nSteal);
printf(" # failed steals = %d\n", stealStack[i]->nFail);
printf(" maximum stack depth = %d\n", stealStack[i]->maxStackDepth);
printf(" work time = %.6f secs (%d sessions)\n",
stealStack[i]->time[SS_WORK], stealStack[i]->entries[SS_WORK]);
printf(" overhead time = %.6f secs (%d sessions)\n",
stealStack[i]->time[SS_OVH], stealStack[i]->entries[SS_OVH]);
printf(" search time = %.6f secs (%d sessions)\n",
stealStack[i]->time[SS_SEARCH], stealStack[i]->entries[SS_SEARCH]);
printf(" idle time = %.6f secs (%d sessions)\n",
stealStack[i]->time[SS_IDLE], stealStack[i]->entries[SS_IDLE]);
printf(" wakeups = %d, false wakeups = %d (%.2f%%)",
stealStack[i]->wakeups, stealStack[i]->falseWakeups,
(stealStack[i]->wakeups == 0) ? 0.00 : ((((double)stealStack[i]->falseWakeups)/stealStack[i]->wakeups)*100.0));
printf("\n");
}
}
#ifdef TRACE
printSessionRecords();
#endif
// tree statistics output to stat.txt, if requested
#ifdef UTS_STAT
if (stats) {
FILE *fp;
char * tmpstr;
char strBuf[5000];
int ind = 0;
fp = fopen("stat.txt", "a+w");
fprintf(fp, "\n------------------------------------------------------------------------------------------------------\n");
ind = uts_paramsToStr(strBuf, ind);
ind = impl_paramsToStr(strBuf, ind);
//showParametersStr(strBuf);
fprintf(fp, "%s\n", strBuf);
fprintf(fp, "\nTotal nodes = %d\n", totalNodes);
fprintf(fp, "Max depth = %d\n\n", maxHeight);
fprintf(fp, "Tseng ImbMeasure(overall)\n max:\t\t%lf \n avg:\t\t%lf \n devMaxAvg:\t %lf\n normDevMaxAvg: %lf\t\t\n\n",
imb_max/totalNodes, imb_avg/totalNodes, imb_devmaxavg/totalNodes,
imb_normdevmaxavg/totalNodes);
switch (unbType){
case 0: tmpstr = "(min imb weighted by size)"; break;
case 1: tmpstr = "(min imb not weighted by size)"; break;
case 2: tmpstr = "(max imb not weighted by size)"; break;
default: tmpstr = "(?unknown measure)"; break;
}
fprintf(fp, "ImbMeasure:\t%s\n Overall:\t %lf\n Max:\t\t%lf\n Min:\t\t%lf\n\n",
tmpstr, treeImb, minImb, maxImb);
showHist(fp);
fprintf(fp, "\n------------------------------------------------------------------------------------------------------\n\n\n");
fclose(fp);
}
#endif
}
/* PThreads main() function:
* Pthreads is quite a bit different because of how global data has to be stored
* using setspecific() and getspecific(). So, many functions are not safe to call
* in the single-threaded context.
*/
#ifdef __PTHREADS__
int pthread_main(int argc, char *argv[]) {
Node root;
double t1, t2;
int i, err;
void *rval;
struct pthread_args *args;
pthread_t *thread_ids;
uts_parseParams(argc, argv);
uts_printParams();
cb_init();
/* allocate stealstacks */
for (i = 0; i < GET_NUM_THREADS; i++) {
stealStack[i] = ALLOC (sizeof(StealStack));
ss_init(stealStack[i], MAXSTACKDEPTH);
}
/* initialize root node and push on thread 0 stack */
uts_initRoot(&root, type);
ss_push(stealStack[0], &root);
thread_ids = malloc(sizeof(pthread_t)*GET_NUM_THREADS);
args = malloc(sizeof(struct pthread_args)*GET_NUM_THREADS);
pthread_key_create(&pthread_thread_num, NULL);
/* start timing */
t1 = uts_wctime();
for (i = 0; i < GET_NUM_THREADS; i++) {
ss_initState(stealStack[i]);
args[i].ss = stealStack[i];
args[i].id = i;
err = pthread_create(&thread_ids[i], NULL, pthread_spawn_search, (void*)&args[i]);
if (err != 0) {
printf("FATAL: Error spawning thread %d\n", err);
impl_abort(1);
}
}
for (i = 0; i < GET_NUM_THREADS; i++) {
pthread_join(thread_ids[i], &rval);
}
/* stop timing */
t2 = uts_wctime();
showStats(t2-t1);
return 0;
}
#endif /* __PTHREADS__ */
/* Main() function for: Sequential, OpenMP, UPC, and Shmem
*
* Notes on execution model:
* - under openMP, global vars are all shared
* - under UPC, global vars are private unless explicitly shared
* - UPC is SPMD starting with main, OpenMP goes SPMD after
* parsing parameters
*/
int main(int argc, char *argv[]) {
Node root;
#ifdef __PTHREADS__
return pthread_main(argc, argv);
#endif
#ifdef _SHMEM
start_pes(0);
#endif
/* determine benchmark parameters (all PEs) */
uts_parseParams(argc, argv);
#ifdef UTS_STAT
if (stats)
initHist();
#endif
/* cancellable barrier initialization (single threaded under OMP) */
cb_init();
/********** SPMD Parallel Region **********/
#pragma omp parallel
{
double t1, t2, et;
StealStack * ss;
/* show parameter settings */
if (GET_THREAD_NUM == 0) {
uts_printParams();
}
/* initialize stealstacks */
#ifdef _SHMEM
{
/* Shared allocation is a collective operation in Shmem. These
* need to be done all at once and in the same order on each PE.
*
* Note: Only our own stealstack will contain valid data as UTS runs.
* For stats, we'll need to gather everyone else's stealstacks
*/
int i;
for (i = 0; i < GET_NUM_THREADS; i++) {
stealStack[i] = (SHARED StealStack *) ALLOC (sizeof(StealStack));
ss = (StealStack *) stealStack[i];
ss_init(ss, MAXSTACKDEPTH);
}
ss = stealStack[GET_THREAD_NUM];
}
#else
stealStack[GET_THREAD_NUM] = (SHARED StealStack *) ALLOC (sizeof(StealStack));
ss = (StealStack *) stealStack[GET_THREAD_NUM];
ss_init(ss, MAXSTACKDEPTH);
#endif /* _SHMEM */
/* initialize root node and push on thread 0 stack */
if (GET_THREAD_NUM == 0) {
initRootNode(&root, type);
ss_push(ss, &root);
}
// line up for the start
#pragma omp barrier
BARRIER
/* time parallel search */
ss_initState(ss);
t1 = uts_wctime();
parTreeSearch(ss);
t2 = uts_wctime();
et = t2 - t1;
#ifdef TRACE
startTime[GET_THREAD_NUM] = t1;
ss->md->sessionRecords[SS_IDLE][ss->entries[SS_IDLE] - 1].endTime = t2;
#endif
#pragma omp barrier
BARRIER
/* display results */
if (GET_THREAD_NUM == 0) {
showStats(et);
}
}
/********** End Parallel Region **********/
return 0;
}
|
kmp_csupport.c | /*
* kmp_csupport.c -- kfront linkage support for OpenMP.
*/
/* <copyright>
Copyright (c) 1997-2016 Intel Corporation. All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
</copyright> */
#include "omp.h" /* extern "C" declarations of user-visible routines */
#include "kmp.h"
#include "kmp_i18n.h"
#include "kmp_itt.h"
#include "kmp_error.h"
#include "kmp_stats.h"
#if OMPT_SUPPORT
#include "ompt-internal.h"
#include "ompt-specific.h"
#endif
#define MAX_MESSAGE 512
/* ------------------------------------------------------------------------ */
/* ------------------------------------------------------------------------ */
/* flags will be used in future, e.g., to implement */
/* openmp_strict library restrictions */
/*!
* @ingroup STARTUP_SHUTDOWN
* @param loc in source location information
* @param flags in for future use (currently ignored)
*
* Initialize the runtime library. This call is optional; if it is not made then
* it will be implicitly called by attempts to use other library functions.
*
*/
void
__kmpc_begin(ident_t *loc, kmp_int32 flags)
{
// By default __kmp_ignore_mppbeg() returns TRUE.
if (__kmp_ignore_mppbeg() == FALSE) {
__kmp_internal_begin();
KC_TRACE( 10, ("__kmpc_begin: called\n" ) );
}
}
/*!
* @ingroup STARTUP_SHUTDOWN
* @param loc source location information
*
* Shutdown the runtime library. This is also optional, and even if called will not
* do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to zero.
*/
void
__kmpc_end(ident_t *loc)
{
// By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end() call no-op.
// However, this can be overridden with KMP_IGNORE_MPPEND environment variable.
// If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend() returns FALSE and __kmpc_end()
// will unregister this root (it can cause library shut down).
if (__kmp_ignore_mppend() == FALSE) {
KC_TRACE( 10, ("__kmpc_end: called\n" ) );
KA_TRACE( 30, ("__kmpc_end\n" ));
__kmp_internal_end_thread( -1 );
}
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The global thread index of the active thread.
This function can be called in any context.
If the runtime has ony been entered at the outermost level from a
single (necessarily non-OpenMP<sup>*</sup>) thread, then the thread number is that
which would be returned by omp_get_thread_num() in the outermost
active parallel construct. (Or zero if there is no active parallel
construct, since the master thread is necessarily thread zero).
If multiple non-OpenMP threads all enter an OpenMP construct then this
will be a unique thread identifier among all the threads created by
the OpenMP runtime (but the value cannote be defined in terms of
OpenMP thread ids returned by omp_get_thread_num()).
*/
kmp_int32
__kmpc_global_thread_num(ident_t *loc)
{
kmp_int32 gtid = __kmp_entry_gtid();
KC_TRACE( 10, ("__kmpc_global_thread_num: T#%d\n", gtid ) );
return gtid;
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The number of threads under control of the OpenMP<sup>*</sup> runtime
This function can be called in any context.
It returns the total number of threads under the control of the OpenMP runtime. That is
not a number that can be determined by any OpenMP standard calls, since the library may be
called from more than one non-OpenMP thread, and this reflects the total over all such calls.
Similarly the runtime maintains underlying threads even when they are not active (since the cost
of creating and destroying OS threads is high), this call counts all such threads even if they are not
waiting for work.
*/
kmp_int32
__kmpc_global_num_threads(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_nth ) );
return TCR_4(__kmp_nth);
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The thread number of the calling thread in the innermost active parallel construct.
*/
kmp_int32
__kmpc_bound_thread_num(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_bound_thread_num: called\n" ) );
return __kmp_tid_from_gtid( __kmp_entry_gtid() );
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return The number of threads in the innermost active parallel construct.
*/
kmp_int32
__kmpc_bound_num_threads(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_bound_num_threads: called\n" ) );
return __kmp_entry_thread() -> th.th_team -> t.t_nproc;
}
/*!
* @ingroup DEPRECATED
* @param loc location description
*
* This function need not be called. It always returns TRUE.
*/
kmp_int32
__kmpc_ok_to_fork(ident_t *loc)
{
#ifndef KMP_DEBUG
return TRUE;
#else
const char *semi2;
const char *semi3;
int line_no;
if (__kmp_par_range == 0) {
return TRUE;
}
semi2 = loc->psource;
if (semi2 == NULL) {
return TRUE;
}
semi2 = strchr(semi2, ';');
if (semi2 == NULL) {
return TRUE;
}
semi2 = strchr(semi2 + 1, ';');
if (semi2 == NULL) {
return TRUE;
}
if (__kmp_par_range_filename[0]) {
const char *name = semi2 - 1;
while ((name > loc->psource) && (*name != '/') && (*name != ';')) {
name--;
}
if ((*name == '/') || (*name == ';')) {
name++;
}
if (strncmp(__kmp_par_range_filename, name, semi2 - name)) {
return __kmp_par_range < 0;
}
}
semi3 = strchr(semi2 + 1, ';');
if (__kmp_par_range_routine[0]) {
if ((semi3 != NULL) && (semi3 > semi2)
&& (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) {
return __kmp_par_range < 0;
}
}
if (KMP_SSCANF(semi3 + 1, "%d", &line_no) == 1) {
if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) {
return __kmp_par_range > 0;
}
return __kmp_par_range < 0;
}
return TRUE;
#endif /* KMP_DEBUG */
}
/*!
@ingroup THREAD_STATES
@param loc Source location information.
@return 1 if this thread is executing inside an active parallel region, zero if not.
*/
kmp_int32
__kmpc_in_parallel( ident_t *loc )
{
return __kmp_entry_thread() -> th.th_root -> r.r_active;
}
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
@param num_threads number of threads requested for this parallel construct
Set the number of threads to be used by the next fork spawned by this thread.
This call is only required if the parallel construct has a `num_threads` clause.
*/
void
__kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads )
{
KA_TRACE( 20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n",
global_tid, num_threads ) );
__kmp_push_num_threads( loc, global_tid, num_threads );
}
void
__kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid )
{
KA_TRACE( 20, ("__kmpc_pop_num_threads: enter\n" ) );
/* the num_threads are automatically popped */
}
#if OMP_40_ENABLED
void
__kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind )
{
KA_TRACE( 20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n",
global_tid, proc_bind ) );
__kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind );
}
#endif /* OMP_40_ENABLED */
/*!
@ingroup PARALLEL
@param loc source location information
@param argc total number of arguments in the ellipsis
@param microtask pointer to callback routine consisting of outlined parallel construct
@param ... pointers to shared variables that aren't global
Do the actual fork and call the microtask in the relevant number of threads.
*/
void
__kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
{
int gtid = __kmp_entry_gtid();
#if (KMP_STATS_ENABLED)
int inParallel = __kmpc_in_parallel(loc);
if (inParallel)
{
KMP_COUNT_BLOCK(OMP_NESTED_PARALLEL);
}
else
{
KMP_STOP_EXPLICIT_TIMER(OMP_serial);
KMP_COUNT_BLOCK(OMP_PARALLEL);
}
#endif
// maybe to save thr_state is enough here
{
va_list ap;
va_start( ap, microtask );
#if OMPT_SUPPORT
int tid = __kmp_tid_from_gtid( gtid );
kmp_info_t *master_th = __kmp_threads[ gtid ];
kmp_team_t *parent_team = master_th->th.th_team;
if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0);
}
#endif
#if INCLUDE_SSC_MARKS
SSC_MARK_FORKING();
#endif
__kmp_fork_call( loc, gtid, fork_context_intel,
argc,
#if OMPT_SUPPORT
VOLATILE_CAST(void *) microtask, // "unwrapped" task
#endif
VOLATILE_CAST(microtask_t) microtask, // "wrapped" task
VOLATILE_CAST(launch_t) __kmp_invoke_task_func,
/* TODO: revert workaround for Intel(R) 64 tracker #96 */
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
&ap
#else
ap
#endif
);
#if INCLUDE_SSC_MARKS
SSC_MARK_JOINING();
#endif
__kmp_join_call( loc, gtid
#if OMPT_SUPPORT
, fork_context_intel
#endif
);
va_end( ap );
#if OMPT_SUPPORT
if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = 0;
}
#endif
}
#if (KMP_STATS_ENABLED)
if (!inParallel)
KMP_START_EXPLICIT_TIMER(OMP_serial);
#endif
}
#if OMP_40_ENABLED
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
@param num_teams number of teams requested for the teams construct
@param num_threads number of threads per team requested for the teams construct
Set the number of teams to be used by the teams construct.
This call is only required if the teams construct has a `num_teams` clause
or a `thread_limit` clause (or both).
*/
void
__kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads )
{
KA_TRACE( 20, ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n",
global_tid, num_teams, num_threads ) );
__kmp_push_num_teams( loc, global_tid, num_teams, num_threads );
}
/*!
@ingroup PARALLEL
@param loc source location information
@param argc total number of arguments in the ellipsis
@param microtask pointer to callback routine consisting of outlined teams construct
@param ... pointers to shared variables that aren't global
Do the actual fork and call the microtask in the relevant number of threads.
*/
void
__kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
{
int gtid = __kmp_entry_gtid();
kmp_info_t *this_thr = __kmp_threads[ gtid ];
va_list ap;
va_start( ap, microtask );
KMP_COUNT_BLOCK(OMP_TEAMS);
// remember teams entry point and nesting level
this_thr->th.th_teams_microtask = microtask;
this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level; // AC: can be >0 on host
#if OMPT_SUPPORT
kmp_team_t *parent_team = this_thr->th.th_team;
int tid = __kmp_tid_from_gtid( gtid );
if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0);
}
#endif
// check if __kmpc_push_num_teams called, set default number of teams otherwise
if ( this_thr->th.th_teams_size.nteams == 0 ) {
__kmp_push_num_teams( loc, gtid, 0, 0 );
}
KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1);
KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1);
KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1);
__kmp_fork_call( loc, gtid, fork_context_intel,
argc,
#if OMPT_SUPPORT
VOLATILE_CAST(void *) microtask, // "unwrapped" task
#endif
VOLATILE_CAST(microtask_t) __kmp_teams_master, // "wrapped" task
VOLATILE_CAST(launch_t) __kmp_invoke_teams_master,
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
&ap
#else
ap
#endif
);
__kmp_join_call( loc, gtid
#if OMPT_SUPPORT
, fork_context_intel
#endif
);
#if OMPT_SUPPORT
if (ompt_enabled) {
parent_team->t.t_implicit_task_taskdata[tid].
ompt_task_info.frame.reenter_runtime_frame = NULL;
}
#endif
this_thr->th.th_teams_microtask = NULL;
this_thr->th.th_teams_level = 0;
*(kmp_int64*)(&this_thr->th.th_teams_size) = 0L;
va_end( ap );
}
#endif /* OMP_40_ENABLED */
//
// I don't think this function should ever have been exported.
// The __kmpc_ prefix was misapplied. I'm fairly certain that no generated
// openmp code ever called it, but it's been exported from the RTL for so
// long that I'm afraid to remove the definition.
//
int
__kmpc_invoke_task_func( int gtid )
{
return __kmp_invoke_task_func( gtid );
}
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
Enter a serialized parallel construct. This interface is used to handle a
conditional parallel region, like this,
@code
#pragma omp parallel if (condition)
@endcode
when the condition is false.
*/
void
__kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid)
{
__kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with
* kmp_fork_call since the tasks to be done are similar in each case.
*/
}
/*!
@ingroup PARALLEL
@param loc source location information
@param global_tid global thread number
Leave a serialized parallel construct.
*/
void
__kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid)
{
kmp_internal_control_t *top;
kmp_info_t *this_thr;
kmp_team_t *serial_team;
KC_TRACE( 10, ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) );
/* skip all this code for autopar serialized loops since it results in
unacceptable overhead */
if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) )
return;
// Not autopar code
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
this_thr = __kmp_threads[ global_tid ];
serial_team = this_thr->th.th_serial_team;
#if OMP_41_ENABLED
kmp_task_team_t * task_team = this_thr->th.th_task_team;
// we need to wait for the proxy tasks before finishing the thread
if ( task_team != NULL && task_team->tt.tt_found_proxy_tasks )
__kmp_task_team_wait(this_thr, serial_team, NULL ); // is an ITT object needed here?
#endif
KMP_MB();
KMP_DEBUG_ASSERT( serial_team );
KMP_ASSERT( serial_team -> t.t_serialized );
KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team );
KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team );
KMP_DEBUG_ASSERT( serial_team -> t.t_threads );
KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr );
/* If necessary, pop the internal control stack values and replace the team values */
top = serial_team -> t.t_control_stack_top;
if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) {
copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top );
serial_team -> t.t_control_stack_top = top -> next;
__kmp_free(top);
}
//if( serial_team -> t.t_serialized > 1 )
serial_team -> t.t_level--;
/* pop dispatch buffers stack */
KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer);
{
dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer;
serial_team->t.t_dispatch->th_disp_buffer =
serial_team->t.t_dispatch->th_disp_buffer->next;
__kmp_free( disp_buffer );
}
-- serial_team -> t.t_serialized;
if ( serial_team -> t.t_serialized == 0 ) {
/* return to the parallel section */
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
if ( __kmp_inherit_fp_control && serial_team->t.t_fp_control_saved ) {
__kmp_clear_x87_fpu_status_word();
__kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word );
__kmp_load_mxcsr( &serial_team->t.t_mxcsr );
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
this_thr -> th.th_team = serial_team -> t.t_parent;
this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid;
/* restore values cached in the thread */
this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc; /* JPH */
this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0]; /* JPH */
this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized;
/* TODO the below shouldn't need to be adjusted for serialized teams */
this_thr -> th.th_dispatch = & this_thr -> th.th_team ->
t.t_dispatch[ serial_team -> t.t_master_tid ];
__kmp_pop_current_task_from_thread( this_thr );
KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 );
this_thr -> th.th_current_task -> td_flags.executing = 1;
if ( __kmp_tasking_mode != tskm_immediate_exec ) {
// Copy the task team from the new child / old parent team to the thread.
this_thr->th.th_task_team = this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state];
KA_TRACE(20, ("__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n",
global_tid, this_thr->th.th_task_team, this_thr->th.th_team));
}
} else {
if ( __kmp_tasking_mode != tskm_immediate_exec ) {
KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n",
global_tid, serial_team, serial_team -> t.t_serialized ) );
}
}
if ( __kmp_env_consistency_check )
__kmp_pop_parallel( global_tid, NULL );
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information.
Execute <tt>flush</tt>. This is implemented as a full memory fence. (Though
depending on the memory ordering convention obeyed by the compiler
even that may not be necessary).
*/
void
__kmpc_flush(ident_t *loc)
{
KC_TRACE( 10, ("__kmpc_flush: called\n" ) );
/* need explicit __mf() here since use volatile instead in library */
KMP_MB(); /* Flush all pending memory write invalidates. */
#if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 )
#if KMP_MIC
// fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used.
// We shouldn't need it, though, since the ABI rules require that
// * If the compiler generates NGO stores it also generates the fence
// * If users hand-code NGO stores they should insert the fence
// therefore no incomplete unordered stores should be visible.
#else
// C74404
// This is to address non-temporal store instructions (sfence needed).
// The clflush instruction is addressed either (mfence needed).
// Probably the non-temporal load monvtdqa instruction should also be addressed.
// mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2.
if ( ! __kmp_cpuinfo.initialized ) {
__kmp_query_cpuid( & __kmp_cpuinfo );
}; // if
if ( ! __kmp_cpuinfo.sse2 ) {
// CPU cannot execute SSE2 instructions.
} else {
#if KMP_COMPILER_ICC || KMP_COMPILER_MSVC
_mm_mfence();
#else
__sync_synchronize();
#endif // KMP_COMPILER_ICC
}; // if
#endif // KMP_MIC
#elif (KMP_ARCH_ARM || KMP_ARCH_AARCH64)
// Nothing to see here move along
#elif KMP_ARCH_PPC64
// Nothing needed here (we have a real MB above).
#if KMP_OS_CNK
// The flushing thread needs to yield here; this prevents a
// busy-waiting thread from saturating the pipeline. flush is
// often used in loops like this:
// while (!flag) {
// #pragma omp flush(flag)
// }
// and adding the yield here is good for at least a 10x speedup
// when running >2 threads per core (on the NAS LU benchmark).
__kmp_yield(TRUE);
#endif
#else
#error Unknown or unsupported architecture
#endif
}
/* -------------------------------------------------------------------------- */
/* -------------------------------------------------------------------------- */
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
Execute a barrier.
*/
void
__kmpc_barrier(ident_t *loc, kmp_int32 global_tid)
{
KMP_COUNT_BLOCK(OMP_BARRIER);
KMP_TIME_BLOCK(OMP_barrier);
KC_TRACE( 10, ("__kmpc_barrier: called T#%d\n", global_tid ) );
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
if ( __kmp_env_consistency_check ) {
if ( loc == 0 ) {
KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user?
}; // if
__kmp_check_barrier( global_tid, ct_barrier, loc );
}
__kmp_threads[ global_tid ]->th.th_ident = loc;
// TODO: explicit barrier_wait_id:
// this function is called when 'barrier' directive is present or
// implicit barrier at the end of a worksharing construct.
// 1) better to add a per-thread barrier counter to a thread data structure
// 2) set to 0 when a new team is created
// 4) no sync is required
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
}
/* The BARRIER for a MASTER section is always explicit */
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
@return 1 if this thread should execute the <tt>master</tt> block, 0 otherwise.
*/
kmp_int32
__kmpc_master(ident_t *loc, kmp_int32 global_tid)
{
KMP_COUNT_BLOCK(OMP_MASTER);
int status = 0;
KC_TRACE( 10, ("__kmpc_master: called T#%d\n", global_tid ) );
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
if( KMP_MASTER_GTID( global_tid )) {
KMP_START_EXPLICIT_TIMER(OMP_master);
status = 1;
}
#if OMPT_SUPPORT && OMPT_TRACE
if (status) {
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_master_begin)) {
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
ompt_callbacks.ompt_callback(ompt_event_master_begin)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
}
}
#endif
if ( __kmp_env_consistency_check ) {
#if KMP_USE_DYNAMIC_LOCK
if (status)
__kmp_push_sync( global_tid, ct_master, loc, NULL, 0 );
else
__kmp_check_sync( global_tid, ct_master, loc, NULL, 0 );
#else
if (status)
__kmp_push_sync( global_tid, ct_master, loc, NULL );
else
__kmp_check_sync( global_tid, ct_master, loc, NULL );
#endif
}
return status;
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
Mark the end of a <tt>master</tt> region. This should only be called by the thread
that executes the <tt>master</tt> region.
*/
void
__kmpc_end_master(ident_t *loc, kmp_int32 global_tid)
{
KC_TRACE( 10, ("__kmpc_end_master: called T#%d\n", global_tid ) );
KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid ));
KMP_STOP_EXPLICIT_TIMER(OMP_master);
#if OMPT_SUPPORT && OMPT_TRACE
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_master_end)) {
int tid = __kmp_tid_from_gtid( global_tid );
ompt_callbacks.ompt_callback(ompt_event_master_end)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
}
#endif
if ( __kmp_env_consistency_check ) {
if( global_tid < 0 )
KMP_WARNING( ThreadIdentInvalid );
if( KMP_MASTER_GTID( global_tid ))
__kmp_pop_sync( global_tid, ct_master, loc );
}
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param gtid global thread number.
Start execution of an <tt>ordered</tt> construct.
*/
void
__kmpc_ordered( ident_t * loc, kmp_int32 gtid )
{
int cid = 0;
kmp_info_t *th;
KMP_DEBUG_ASSERT( __kmp_init_serial );
KC_TRACE( 10, ("__kmpc_ordered: called T#%d\n", gtid ));
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
#if USE_ITT_BUILD
__kmp_itt_ordered_prep( gtid );
// TODO: ordered_wait_id
#endif /* USE_ITT_BUILD */
th = __kmp_threads[ gtid ];
#if OMPT_SUPPORT && OMPT_TRACE
if (ompt_enabled) {
/* OMPT state update */
th->th.ompt_thread_info.wait_id = (uint64_t) loc;
th->th.ompt_thread_info.state = ompt_state_wait_ordered;
/* OMPT event callback */
if (ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_wait_ordered)(
th->th.ompt_thread_info.wait_id);
}
}
#endif
if ( th -> th.th_dispatch -> th_deo_fcn != 0 )
(*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc );
else
__kmp_parallel_deo( & gtid, & cid, loc );
#if OMPT_SUPPORT && OMPT_TRACE
if (ompt_enabled) {
/* OMPT state update */
th->th.ompt_thread_info.state = ompt_state_work_parallel;
th->th.ompt_thread_info.wait_id = 0;
/* OMPT event callback */
if (ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)(
th->th.ompt_thread_info.wait_id);
}
}
#endif
#if USE_ITT_BUILD
__kmp_itt_ordered_start( gtid );
#endif /* USE_ITT_BUILD */
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param gtid global thread number.
End execution of an <tt>ordered</tt> construct.
*/
void
__kmpc_end_ordered( ident_t * loc, kmp_int32 gtid )
{
int cid = 0;
kmp_info_t *th;
KC_TRACE( 10, ("__kmpc_end_ordered: called T#%d\n", gtid ) );
#if USE_ITT_BUILD
__kmp_itt_ordered_end( gtid );
// TODO: ordered_wait_id
#endif /* USE_ITT_BUILD */
th = __kmp_threads[ gtid ];
if ( th -> th.th_dispatch -> th_dxo_fcn != 0 )
(*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc );
else
__kmp_parallel_dxo( & gtid, & cid, loc );
#if OMPT_SUPPORT && OMPT_BLAME
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_ordered)) {
ompt_callbacks.ompt_callback(ompt_event_release_ordered)(
th->th.ompt_thread_info.wait_id);
}
#endif
}
#if KMP_USE_DYNAMIC_LOCK
static __forceinline void
__kmp_init_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid, kmp_indirect_locktag_t tag)
{
// Pointer to the allocated indirect lock is written to crit, while indexing is ignored.
void *idx;
kmp_indirect_lock_t **lck;
lck = (kmp_indirect_lock_t **)crit;
kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag);
KMP_I_LOCK_FUNC(ilk, init)(ilk->lock);
KMP_SET_I_LOCK_LOCATION(ilk, loc);
KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section);
KA_TRACE(20, ("__kmp_init_indirect_csptr: initialized indirect lock #%d\n", tag));
#if USE_ITT_BUILD
__kmp_itt_critical_creating(ilk->lock, loc);
#endif
int status = KMP_COMPARE_AND_STORE_PTR(lck, 0, ilk);
if (status == 0) {
#if USE_ITT_BUILD
__kmp_itt_critical_destroyed(ilk->lock);
#endif
// We don't really need to destroy the unclaimed lock here since it will be cleaned up at program exit.
//KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx);
}
KMP_DEBUG_ASSERT(*lck != NULL);
}
// Fast-path acquire tas lock
#define KMP_ACQUIRE_TAS_LOCK(lock, gtid) { \
kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \
if (l->lk.poll != KMP_LOCK_FREE(tas) || \
! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \
kmp_uint32 spins; \
KMP_FSYNC_PREPARE(l); \
KMP_INIT_YIELD(spins); \
if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
KMP_YIELD(TRUE); \
} else { \
KMP_YIELD_SPIN(spins); \
} \
kmp_backoff_t backoff = __kmp_spin_backoff_params; \
while (l->lk.poll != KMP_LOCK_FREE(tas) || \
! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \
__kmp_spin_backoff(&backoff); \
if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
KMP_YIELD(TRUE); \
} else { \
KMP_YIELD_SPIN(spins); \
} \
} \
} \
KMP_FSYNC_ACQUIRED(l); \
}
// Fast-path test tas lock
#define KMP_TEST_TAS_LOCK(lock, gtid, rc) { \
kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \
rc = l->lk.poll == KMP_LOCK_FREE(tas) && \
KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas)); \
}
// Fast-path release tas lock
#define KMP_RELEASE_TAS_LOCK(lock, gtid) { \
TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, KMP_LOCK_FREE(tas)); \
KMP_MB(); \
}
#if KMP_USE_FUTEX
# include <unistd.h>
# include <sys/syscall.h>
# ifndef FUTEX_WAIT
# define FUTEX_WAIT 0
# endif
# ifndef FUTEX_WAKE
# define FUTEX_WAKE 1
# endif
// Fast-path acquire futex lock
#define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) { \
kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
kmp_int32 gtid_code = (gtid+1) << 1; \
KMP_MB(); \
KMP_FSYNC_PREPARE(ftx); \
kmp_int32 poll_val; \
while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), \
KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) { \
kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; \
if (!cond) { \
if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | KMP_LOCK_BUSY(1, futex))) { \
continue; \
} \
poll_val |= KMP_LOCK_BUSY(1, futex); \
} \
kmp_int32 rc; \
if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, NULL, NULL, 0)) != 0) { \
continue; \
} \
gtid_code |= 1; \
} \
KMP_FSYNC_ACQUIRED(ftx); \
}
// Fast-path test futex lock
#define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) { \
kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY(gtid+1, futex) << 1)) { \
KMP_FSYNC_ACQUIRED(ftx); \
rc = TRUE; \
} else { \
rc = FALSE; \
} \
}
// Fast-path release futex lock
#define KMP_RELEASE_FUTEX_LOCK(lock, gtid) { \
kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
KMP_MB(); \
KMP_FSYNC_RELEASING(ftx); \
kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), KMP_LOCK_FREE(futex)); \
if (KMP_LOCK_STRIP(poll_val) & 1) { \
syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0); \
} \
KMP_MB(); \
KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \
}
#endif // KMP_USE_FUTEX
#else // KMP_USE_DYNAMIC_LOCK
static kmp_user_lock_p
__kmp_get_critical_section_ptr( kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid )
{
kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit;
//
// Because of the double-check, the following load
// doesn't need to be volatile.
//
kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR( *lck_pp );
if ( lck == NULL ) {
void * idx;
// Allocate & initialize the lock.
// Remember allocated locks in table in order to free them in __kmp_cleanup()
lck = __kmp_user_lock_allocate( &idx, gtid, kmp_lf_critical_section );
__kmp_init_user_lock_with_checks( lck );
__kmp_set_user_lock_location( lck, loc );
#if USE_ITT_BUILD
__kmp_itt_critical_creating( lck );
// __kmp_itt_critical_creating() should be called *before* the first usage of underlying
// lock. It is the only place where we can guarantee it. There are chances the lock will
// destroyed with no usage, but it is not a problem, because this is not real event seen
// by user but rather setting name for object (lock). See more details in kmp_itt.h.
#endif /* USE_ITT_BUILD */
//
// Use a cmpxchg instruction to slam the start of the critical
// section with the lock pointer. If another thread beat us
// to it, deallocate the lock, and use the lock that the other
// thread allocated.
//
int status = KMP_COMPARE_AND_STORE_PTR( lck_pp, 0, lck );
if ( status == 0 ) {
// Deallocate the lock and reload the value.
#if USE_ITT_BUILD
__kmp_itt_critical_destroyed( lck );
// Let ITT know the lock is destroyed and the same memory location may be reused for
// another purpose.
#endif /* USE_ITT_BUILD */
__kmp_destroy_user_lock_with_checks( lck );
__kmp_user_lock_free( &idx, gtid, lck );
lck = (kmp_user_lock_p)TCR_PTR( *lck_pp );
KMP_DEBUG_ASSERT( lck != NULL );
}
}
return lck;
}
#endif // KMP_USE_DYNAMIC_LOCK
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or
some other suitably unique value.
Enter code protected by a `critical` construct.
This function blocks until the executing thread can enter the critical section.
*/
void
__kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit )
{
#if KMP_USE_DYNAMIC_LOCK
__kmpc_critical_with_hint(loc, global_tid, crit, omp_lock_hint_none);
#else
KMP_COUNT_BLOCK(OMP_CRITICAL);
kmp_user_lock_p lck;
KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) );
//TODO: add THR_OVHD_STATE
KMP_CHECK_USER_LOCK_INIT();
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#endif
else { // ticket, queuing or drdpa
lck = __kmp_get_critical_section_ptr( crit, loc, global_tid );
}
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_critical, loc, lck );
/* since the critical directive binds to all threads, not just
* the current team we have to check this even if we are in a
* serialized team */
/* also, even if we are the uber thread, we still have to conduct the lock,
* as we have to contend with sibling threads */
#if USE_ITT_BUILD
__kmp_itt_critical_acquiring( lck );
#endif /* USE_ITT_BUILD */
// Value of 'crit' should be good for using as a critical_id of the critical section directive.
__kmp_acquire_user_lock_with_checks( lck, global_tid );
#if USE_ITT_BUILD
__kmp_itt_critical_acquired( lck );
#endif /* USE_ITT_BUILD */
KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid ));
#endif // KMP_USE_DYNAMIC_LOCK
}
#if KMP_USE_DYNAMIC_LOCK
// Converts the given hint to an internal lock implementation
static __forceinline kmp_dyna_lockseq_t
__kmp_map_hint_to_lock(uintptr_t hint)
{
#if KMP_USE_TSX
# define KMP_TSX_LOCK(seq) lockseq_##seq
#else
# define KMP_TSX_LOCK(seq) __kmp_user_lock_seq
#endif
// Hints that do not require further logic
if (hint & kmp_lock_hint_hle)
return KMP_TSX_LOCK(hle);
if (hint & kmp_lock_hint_rtm)
return (__kmp_cpuinfo.rtm)? KMP_TSX_LOCK(rtm): __kmp_user_lock_seq;
if (hint & kmp_lock_hint_adaptive)
return (__kmp_cpuinfo.rtm)? KMP_TSX_LOCK(adaptive): __kmp_user_lock_seq;
// Rule out conflicting hints first by returning the default lock
if ((hint & omp_lock_hint_contended) && (hint & omp_lock_hint_uncontended))
return __kmp_user_lock_seq;
if ((hint & omp_lock_hint_speculative) && (hint & omp_lock_hint_nonspeculative))
return __kmp_user_lock_seq;
// Do not even consider speculation when it appears to be contended
if (hint & omp_lock_hint_contended)
return lockseq_queuing;
// Uncontended lock without speculation
if ((hint & omp_lock_hint_uncontended) && !(hint & omp_lock_hint_speculative))
return lockseq_tas;
// HLE lock for speculation
if (hint & omp_lock_hint_speculative)
return KMP_TSX_LOCK(hle);
return __kmp_user_lock_seq;
}
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number.
@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section,
or some other suitably unique value.
@param hint the lock hint.
Enter code protected by a `critical` construct with a hint. The hint value is used to suggest a lock implementation.
This function blocks until the executing thread can enter the critical section unless the hint suggests use of
speculative execution and the hardware supports it.
*/
void
__kmpc_critical_with_hint( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit, uintptr_t hint )
{
KMP_COUNT_BLOCK(OMP_CRITICAL);
kmp_user_lock_p lck;
KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) );
kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit;
// Check if it is initialized.
if (*lk == 0) {
kmp_dyna_lockseq_t lckseq = __kmp_map_hint_to_lock(hint);
if (KMP_IS_D_LOCK(lckseq)) {
KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(lckseq));
} else {
__kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(lckseq));
}
}
// Branch for accessing the actual lock object and set operation. This branching is inevitable since
// this lock initialization does not follow the normal dispatch path (lock table is not used).
if (KMP_EXTRACT_D_TAG(lk) != 0) {
lck = (kmp_user_lock_p)lk;
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint));
}
# if USE_ITT_BUILD
__kmp_itt_critical_acquiring(lck);
# endif
# if KMP_USE_INLINED_TAS
if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
KMP_ACQUIRE_TAS_LOCK(lck, global_tid);
} else
# elif KMP_USE_INLINED_FUTEX
if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid);
} else
# endif
{
KMP_D_LOCK_FUNC(lk, set)(lk, global_tid);
}
} else {
kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk);
lck = ilk->lock;
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint));
}
# if USE_ITT_BUILD
__kmp_itt_critical_acquiring(lck);
# endif
KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid);
}
#if USE_ITT_BUILD
__kmp_itt_critical_acquired( lck );
#endif /* USE_ITT_BUILD */
KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid ));
} // __kmpc_critical_with_hint
#endif // KMP_USE_DYNAMIC_LOCK
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param global_tid global thread number .
@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or
some other suitably unique value.
Leave a critical section, releasing any lock that was held during its execution.
*/
void
__kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit)
{
kmp_user_lock_p lck;
KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid ));
#if KMP_USE_DYNAMIC_LOCK
if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
lck = (kmp_user_lock_p)crit;
KMP_ASSERT(lck != NULL);
if (__kmp_env_consistency_check) {
__kmp_pop_sync(global_tid, ct_critical, loc);
}
# if USE_ITT_BUILD
__kmp_itt_critical_releasing( lck );
# endif
# if KMP_USE_INLINED_TAS
if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
KMP_RELEASE_TAS_LOCK(lck, global_tid);
} else
# elif KMP_USE_INLINED_FUTEX
if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
KMP_RELEASE_FUTEX_LOCK(lck, global_tid);
} else
# endif
{
KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
}
} else {
kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit));
KMP_ASSERT(ilk != NULL);
lck = ilk->lock;
if (__kmp_env_consistency_check) {
__kmp_pop_sync(global_tid, ct_critical, loc);
}
# if USE_ITT_BUILD
__kmp_itt_critical_releasing( lck );
# endif
KMP_I_LOCK_FUNC(ilk, unset)(lck, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) {
lck = (kmp_user_lock_p)crit;
}
#endif
else { // ticket, queuing or drdpa
lck = (kmp_user_lock_p) TCR_PTR(*((kmp_user_lock_p *)crit));
}
KMP_ASSERT(lck != NULL);
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_critical, loc );
#if USE_ITT_BUILD
__kmp_itt_critical_releasing( lck );
#endif /* USE_ITT_BUILD */
// Value of 'crit' should be good for using as a critical_id of the critical section directive.
__kmp_release_user_lock_with_checks( lck, global_tid );
#if OMPT_SUPPORT && OMPT_BLAME
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_critical)) {
ompt_callbacks.ompt_callback(ompt_event_release_critical)(
(uint64_t) lck);
}
#endif
#endif // KMP_USE_DYNAMIC_LOCK
KA_TRACE( 15, ("__kmpc_end_critical: done T#%d\n", global_tid ));
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
@return one if the thread should execute the master block, zero otherwise
Start execution of a combined barrier and master. The barrier is executed inside this function.
*/
kmp_int32
__kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid)
{
int status;
KC_TRACE( 10, ("__kmpc_barrier_master: called T#%d\n", global_tid ) );
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
if ( __kmp_env_consistency_check )
__kmp_check_barrier( global_tid, ct_barrier, loc );
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
status = __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL );
return (status != 0) ? 0 : 1;
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
Complete the execution of a combined barrier and master. This function should
only be called at the completion of the <tt>master</tt> code. Other threads will
still be waiting at the barrier and this call releases them.
*/
void
__kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid)
{
KC_TRACE( 10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid ));
__kmp_end_split_barrier ( bs_plain_barrier, global_tid );
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid thread id.
@return one if the thread should execute the master block, zero otherwise
Start execution of a combined barrier and master(nowait) construct.
The barrier is executed inside this function.
There is no equivalent "end" function, since the
*/
kmp_int32
__kmpc_barrier_master_nowait( ident_t * loc, kmp_int32 global_tid )
{
kmp_int32 ret;
KC_TRACE( 10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid ));
if (! TCR_4(__kmp_init_parallel))
__kmp_parallel_initialize();
if ( __kmp_env_consistency_check ) {
if ( loc == 0 ) {
KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user?
}
__kmp_check_barrier( global_tid, ct_barrier, loc );
}
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
ret = __kmpc_master (loc, global_tid);
if ( __kmp_env_consistency_check ) {
/* there's no __kmpc_end_master called; so the (stats) */
/* actions of __kmpc_end_master are done here */
if ( global_tid < 0 ) {
KMP_WARNING( ThreadIdentInvalid );
}
if (ret) {
/* only one thread should do the pop since only */
/* one did the push (see __kmpc_master()) */
__kmp_pop_sync( global_tid, ct_master, loc );
}
}
return (ret);
}
/* The BARRIER for a SINGLE process section is always explicit */
/*!
@ingroup WORK_SHARING
@param loc source location information
@param global_tid global thread number
@return One if this thread should execute the single construct, zero otherwise.
Test whether to execute a <tt>single</tt> construct.
There are no implicit barriers in the two "single" calls, rather the compiler should
introduce an explicit barrier if it is required.
*/
kmp_int32
__kmpc_single(ident_t *loc, kmp_int32 global_tid)
{
KMP_COUNT_BLOCK(OMP_SINGLE);
kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE );
if(rc == TRUE) {
KMP_START_EXPLICIT_TIMER(OMP_single);
}
#if OMPT_SUPPORT && OMPT_TRACE
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
if (ompt_enabled) {
if (rc) {
if (ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)) {
ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id,
team->t.ompt_team_info.microtask);
}
} else {
if (ompt_callbacks.ompt_callback(ompt_event_single_others_begin)) {
ompt_callbacks.ompt_callback(ompt_event_single_others_begin)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
}
this_thr->th.ompt_thread_info.state = ompt_state_wait_single;
}
}
#endif
return rc;
}
/*!
@ingroup WORK_SHARING
@param loc source location information
@param global_tid global thread number
Mark the end of a <tt>single</tt> construct. This function should
only be called by the thread that executed the block of code protected
by the `single` construct.
*/
void
__kmpc_end_single(ident_t *loc, kmp_int32 global_tid)
{
__kmp_exit_single( global_tid );
KMP_STOP_EXPLICIT_TIMER(OMP_single);
#if OMPT_SUPPORT && OMPT_TRACE
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)) {
ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
}
#endif
}
/*!
@ingroup WORK_SHARING
@param loc Source location
@param global_tid Global thread id
Mark the end of a statically scheduled loop.
*/
void
__kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid )
{
KE_TRACE( 10, ("__kmpc_for_static_fini called T#%d\n", global_tid));
#if OMPT_SUPPORT && OMPT_TRACE
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_loop_end)) {
kmp_info_t *this_thr = __kmp_threads[ global_tid ];
kmp_team_t *team = this_thr -> th.th_team;
int tid = __kmp_tid_from_gtid( global_tid );
ompt_callbacks.ompt_callback(ompt_event_loop_end)(
team->t.ompt_team_info.parallel_id,
team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
}
#endif
if ( __kmp_env_consistency_check )
__kmp_pop_workshare( global_tid, ct_pdo, loc );
}
/*
* User routines which take C-style arguments (call by value)
* different from the Fortran equivalent routines
*/
void
ompc_set_num_threads( int arg )
{
// !!!!! TODO: check the per-task binding
__kmp_set_num_threads( arg, __kmp_entry_gtid() );
}
void
ompc_set_dynamic( int flag )
{
kmp_info_t *thread;
/* For the thread-private implementation of the internal controls */
thread = __kmp_entry_thread();
__kmp_save_internal_controls( thread );
set__dynamic( thread, flag ? TRUE : FALSE );
}
void
ompc_set_nested( int flag )
{
kmp_info_t *thread;
/* For the thread-private internal controls implementation */
thread = __kmp_entry_thread();
__kmp_save_internal_controls( thread );
set__nested( thread, flag ? TRUE : FALSE );
}
void
ompc_set_max_active_levels( int max_active_levels )
{
/* TO DO */
/* we want per-task implementation of this internal control */
/* For the per-thread internal controls implementation */
__kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels );
}
void
ompc_set_schedule( omp_sched_t kind, int modifier )
{
// !!!!! TODO: check the per-task binding
__kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier );
}
int
ompc_get_ancestor_thread_num( int level )
{
return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level );
}
int
ompc_get_team_size( int level )
{
return __kmp_get_team_size( __kmp_entry_gtid(), level );
}
void
kmpc_set_stacksize( int arg )
{
// __kmp_aux_set_stacksize initializes the library if needed
__kmp_aux_set_stacksize( arg );
}
void
kmpc_set_stacksize_s( size_t arg )
{
// __kmp_aux_set_stacksize initializes the library if needed
__kmp_aux_set_stacksize( arg );
}
void
kmpc_set_blocktime( int arg )
{
int gtid, tid;
kmp_info_t *thread;
gtid = __kmp_entry_gtid();
tid = __kmp_tid_from_gtid(gtid);
thread = __kmp_thread_from_gtid(gtid);
__kmp_aux_set_blocktime( arg, thread, tid );
}
void
kmpc_set_library( int arg )
{
// __kmp_user_set_library initializes the library if needed
__kmp_user_set_library( (enum library_type)arg );
}
void
kmpc_set_defaults( char const * str )
{
// __kmp_aux_set_defaults initializes the library if needed
__kmp_aux_set_defaults( str, KMP_STRLEN( str ) );
}
void
kmpc_set_disp_num_buffers( int arg )
{
// ignore after initialization because some teams have already
// allocated dispatch buffers
if( __kmp_init_serial == 0 && arg > 0 )
__kmp_dispatch_num_buffers = arg;
}
int
kmpc_set_affinity_mask_proc( int proc, void **mask )
{
#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
return -1;
#else
if ( ! TCR_4(__kmp_init_middle) ) {
__kmp_middle_initialize();
}
return __kmp_aux_set_affinity_mask_proc( proc, mask );
#endif
}
int
kmpc_unset_affinity_mask_proc( int proc, void **mask )
{
#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
return -1;
#else
if ( ! TCR_4(__kmp_init_middle) ) {
__kmp_middle_initialize();
}
return __kmp_aux_unset_affinity_mask_proc( proc, mask );
#endif
}
int
kmpc_get_affinity_mask_proc( int proc, void **mask )
{
#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
return -1;
#else
if ( ! TCR_4(__kmp_init_middle) ) {
__kmp_middle_initialize();
}
return __kmp_aux_get_affinity_mask_proc( proc, mask );
#endif
}
/* -------------------------------------------------------------------------- */
/*!
@ingroup THREADPRIVATE
@param loc source location information
@param gtid global thread number
@param cpy_size size of the cpy_data buffer
@param cpy_data pointer to data to be copied
@param cpy_func helper function to call for copying data
@param didit flag variable: 1=single thread; 0=not single thread
__kmpc_copyprivate implements the interface for the private data broadcast needed for
the copyprivate clause associated with a single region in an OpenMP<sup>*</sup> program (both C and Fortran).
All threads participating in the parallel region call this routine.
One of the threads (called the single thread) should have the <tt>didit</tt> variable set to 1
and all other threads should have that variable set to 0.
All threads pass a pointer to a data buffer (cpy_data) that they have built.
The OpenMP specification forbids the use of nowait on the single region when a copyprivate
clause is present. However, @ref __kmpc_copyprivate implements a barrier internally to avoid
race conditions, so the code generation for the single region should avoid generating a barrier
after the call to @ref __kmpc_copyprivate.
The <tt>gtid</tt> parameter is the global thread id for the current thread.
The <tt>loc</tt> parameter is a pointer to source location information.
Internal implementation: The single thread will first copy its descriptor address (cpy_data)
to a team-private location, then the other threads will each call the function pointed to by
the parameter cpy_func, which carries out the copy by copying the data using the cpy_data buffer.
The cpy_func routine used for the copy and the contents of the data area defined by cpy_data
and cpy_size may be built in any fashion that will allow the copy to be done. For instance,
the cpy_data buffer can hold the actual data to be copied or it may hold a list of pointers
to the data. The cpy_func routine must interpret the cpy_data buffer appropriately.
The interface to cpy_func is as follows:
@code
void cpy_func( void *destination, void *source )
@endcode
where void *destination is the cpy_data pointer for the thread being copied to
and void *source is the cpy_data pointer for the thread being copied from.
*/
void
__kmpc_copyprivate( ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void*,void*), kmp_int32 didit )
{
void **data_ptr;
KC_TRACE( 10, ("__kmpc_copyprivate: called T#%d\n", gtid ));
KMP_MB();
data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data;
if ( __kmp_env_consistency_check ) {
if ( loc == 0 ) {
KMP_WARNING( ConstructIdentInvalid );
}
}
/* ToDo: Optimize the following two barriers into some kind of split barrier */
if (didit) *data_ptr = cpy_data;
/* This barrier is not a barrier region boundary */
#if USE_ITT_NOTIFY
__kmp_threads[gtid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL );
if (! didit) (*cpy_func)( cpy_data, *data_ptr );
/* Consider next barrier the user-visible barrier for barrier region boundaries */
/* Nesting checks are already handled by the single construct checks */
#if USE_ITT_NOTIFY
__kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g. tasks can overwrite the location)
#endif
__kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL );
}
/* -------------------------------------------------------------------------- */
#define INIT_LOCK __kmp_init_user_lock_with_checks
#define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks
#define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks
#define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed
#define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks
#define ACQUIRE_NESTED_LOCK_TIMED __kmp_acquire_nested_user_lock_with_checks_timed
#define RELEASE_LOCK __kmp_release_user_lock_with_checks
#define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks
#define TEST_LOCK __kmp_test_user_lock_with_checks
#define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks
#define DESTROY_LOCK __kmp_destroy_user_lock_with_checks
#define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks
/*
* TODO: Make check abort messages use location info & pass it
* into with_checks routines
*/
#if KMP_USE_DYNAMIC_LOCK
// internal lock initializer
static __forceinline void
__kmp_init_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq)
{
if (KMP_IS_D_LOCK(seq)) {
KMP_INIT_D_LOCK(lock, seq);
#if USE_ITT_BUILD
__kmp_itt_lock_creating((kmp_user_lock_p)lock, NULL);
#endif
} else {
KMP_INIT_I_LOCK(lock, seq);
#if USE_ITT_BUILD
kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
__kmp_itt_lock_creating(ilk->lock, loc);
#endif
}
}
// internal nest lock initializer
static __forceinline void
__kmp_init_nest_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq)
{
#if KMP_USE_TSX
// Don't have nested lock implementation for speculative locks
if (seq == lockseq_hle || seq == lockseq_rtm || seq == lockseq_adaptive)
seq = __kmp_user_lock_seq;
#endif
switch (seq) {
case lockseq_tas:
seq = lockseq_nested_tas;
break;
#if KMP_USE_FUTEX
case lockseq_futex:
seq = lockseq_nested_futex;
break;
#endif
case lockseq_ticket:
seq = lockseq_nested_ticket;
break;
case lockseq_queuing:
seq = lockseq_nested_queuing;
break;
case lockseq_drdpa:
seq = lockseq_nested_drdpa;
break;
default:
seq = lockseq_nested_queuing;
}
KMP_INIT_I_LOCK(lock, seq);
#if USE_ITT_BUILD
kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
__kmp_itt_lock_creating(ilk->lock, loc);
#endif
}
/* initialize the lock with a hint */
void
__kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint)
{
KMP_DEBUG_ASSERT(__kmp_init_serial);
if (__kmp_env_consistency_check && user_lock == NULL) {
KMP_FATAL(LockIsUninitialized, "omp_init_lock_with_hint");
}
__kmp_init_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint));
}
/* initialize the lock with a hint */
void
__kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint)
{
KMP_DEBUG_ASSERT(__kmp_init_serial);
if (__kmp_env_consistency_check && user_lock == NULL) {
KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock_with_hint");
}
__kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint));
}
#endif // KMP_USE_DYNAMIC_LOCK
/* initialize the lock */
void
__kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
KMP_DEBUG_ASSERT(__kmp_init_serial);
if (__kmp_env_consistency_check && user_lock == NULL) {
KMP_FATAL(LockIsUninitialized, "omp_init_lock");
}
__kmp_init_lock_with_hint(loc, user_lock, __kmp_user_lock_seq);
#else // KMP_USE_DYNAMIC_LOCK
static char const * const func = "omp_init_lock";
kmp_user_lock_p lck;
KMP_DEBUG_ASSERT( __kmp_init_serial );
if ( __kmp_env_consistency_check ) {
if ( user_lock == NULL ) {
KMP_FATAL( LockIsUninitialized, func );
}
}
KMP_CHECK_USER_LOCK_INIT();
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_user_lock_allocate( user_lock, gtid, 0 );
}
INIT_LOCK( lck );
__kmp_set_user_lock_location( lck, loc );
#if USE_ITT_BUILD
__kmp_itt_lock_creating( lck );
#endif /* USE_ITT_BUILD */
#endif // KMP_USE_DYNAMIC_LOCK
} // __kmpc_init_lock
/* initialize the lock */
void
__kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
KMP_DEBUG_ASSERT(__kmp_init_serial);
if (__kmp_env_consistency_check && user_lock == NULL) {
KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock");
}
__kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_user_lock_seq);
#else // KMP_USE_DYNAMIC_LOCK
static char const * const func = "omp_init_nest_lock";
kmp_user_lock_p lck;
KMP_DEBUG_ASSERT( __kmp_init_serial );
if ( __kmp_env_consistency_check ) {
if ( user_lock == NULL ) {
KMP_FATAL( LockIsUninitialized, func );
}
}
KMP_CHECK_USER_LOCK_INIT();
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_user_lock_allocate( user_lock, gtid, 0 );
}
INIT_NESTED_LOCK( lck );
__kmp_set_user_lock_location( lck, loc );
#if USE_ITT_BUILD
__kmp_itt_lock_creating( lck );
#endif /* USE_ITT_BUILD */
#endif // KMP_USE_DYNAMIC_LOCK
} // __kmpc_init_nest_lock
void
__kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
# if USE_ITT_BUILD
kmp_user_lock_p lck;
if (KMP_EXTRACT_D_TAG(user_lock) == 0) {
lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock;
} else {
lck = (kmp_user_lock_p)user_lock;
}
__kmp_itt_lock_destroyed(lck);
# endif
KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
#else
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_destroyed( lck );
#endif /* USE_ITT_BUILD */
DESTROY_LOCK( lck );
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
;
}
#endif
else {
__kmp_user_lock_free( user_lock, gtid, lck );
}
#endif // KMP_USE_DYNAMIC_LOCK
} // __kmpc_destroy_lock
/* destroy the lock */
void
__kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
# if USE_ITT_BUILD
kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock);
__kmp_itt_lock_destroyed(ilk->lock);
# endif
KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_destroyed( lck );
#endif /* USE_ITT_BUILD */
DESTROY_NESTED_LOCK( lck );
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
;
}
#endif
else {
__kmp_user_lock_free( user_lock, gtid, lck );
}
#endif // KMP_USE_DYNAMIC_LOCK
} // __kmpc_destroy_nest_lock
void
__kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
KMP_COUNT_BLOCK(OMP_set_lock);
#if KMP_USE_DYNAMIC_LOCK
int tag = KMP_EXTRACT_D_TAG(user_lock);
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object.
# endif
# if KMP_USE_INLINED_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
KMP_ACQUIRE_TAS_LOCK(user_lock, gtid);
} else
# elif KMP_USE_INLINED_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid);
} else
# endif
{
__kmp_direct_set[tag]((kmp_dyna_lock_t *)user_lock, gtid);
}
# if USE_ITT_BUILD
__kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
# endif
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_set_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
ACQUIRE_LOCK( lck, gtid );
#if USE_ITT_BUILD
__kmp_itt_lock_acquired( lck );
#endif /* USE_ITT_BUILD */
#endif // KMP_USE_DYNAMIC_LOCK
}
void
__kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) {
#if KMP_USE_DYNAMIC_LOCK
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
# endif
KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid);
# if USE_ITT_BUILD
__kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
#endif
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_set_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
ACQUIRE_NESTED_LOCK( lck, gtid );
#if USE_ITT_BUILD
__kmp_itt_lock_acquired( lck );
#endif /* USE_ITT_BUILD */
#endif // KMP_USE_DYNAMIC_LOCK
}
void
__kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
#if KMP_USE_DYNAMIC_LOCK
int tag = KMP_EXTRACT_D_TAG(user_lock);
# if USE_ITT_BUILD
__kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
# endif
# if KMP_USE_INLINED_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
KMP_RELEASE_TAS_LOCK(user_lock, gtid);
} else
# elif KMP_USE_INLINED_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
KMP_RELEASE_FUTEX_LOCK(user_lock, gtid);
} else
# endif
{
__kmp_direct_unset[tag]((kmp_dyna_lock_t *)user_lock, gtid);
}
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
/* Can't use serial interval since not block structured */
/* release the lock */
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
// "fast" path implemented to fix customer performance issue
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock );
#endif /* USE_ITT_BUILD */
TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0);
KMP_MB();
return;
#else
lck = (kmp_user_lock_p)user_lock;
#endif
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_unset_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( lck );
#endif /* USE_ITT_BUILD */
RELEASE_LOCK( lck, gtid );
#if OMPT_SUPPORT && OMPT_BLAME
if (ompt_enabled &&
ompt_callbacks.ompt_callback(ompt_event_release_lock)) {
ompt_callbacks.ompt_callback(ompt_event_release_lock)((uint64_t) lck);
}
#endif
#endif // KMP_USE_DYNAMIC_LOCK
}
/* release the lock */
void
__kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
#if KMP_USE_DYNAMIC_LOCK
# if USE_ITT_BUILD
__kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
# endif
KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid);
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
/* Can't use serial interval since not block structured */
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
// "fast" path implemented to fix customer performance issue
kmp_tas_lock_t *tl = (kmp_tas_lock_t*)user_lock;
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock );
#endif /* USE_ITT_BUILD */
if ( --(tl->lk.depth_locked) == 0 ) {
TCW_4(tl->lk.poll, 0);
}
KMP_MB();
return;
#else
lck = (kmp_user_lock_p)user_lock;
#endif
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_unset_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_releasing( lck );
#endif /* USE_ITT_BUILD */
int release_status;
release_status = RELEASE_NESTED_LOCK( lck, gtid );
#if OMPT_SUPPORT && OMPT_BLAME
if (ompt_enabled) {
if (release_status == KMP_LOCK_RELEASED) {
if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)) {
ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)(
(uint64_t) lck);
}
} else if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)) {
ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)(
(uint64_t) lck);
}
}
#endif
#endif // KMP_USE_DYNAMIC_LOCK
}
/* try to acquire the lock */
int
__kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
KMP_COUNT_BLOCK(OMP_test_lock);
#if KMP_USE_DYNAMIC_LOCK
int rc;
int tag = KMP_EXTRACT_D_TAG(user_lock);
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
# endif
# if KMP_USE_INLINED_TAS
if (tag == locktag_tas && !__kmp_env_consistency_check) {
KMP_TEST_TAS_LOCK(user_lock, gtid, rc);
} else
# elif KMP_USE_INLINED_FUTEX
if (tag == locktag_futex && !__kmp_env_consistency_check) {
KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc);
} else
# endif
{
rc = __kmp_direct_test[tag]((kmp_dyna_lock_t *)user_lock, gtid);
}
if (rc) {
# if USE_ITT_BUILD
__kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
# endif
return FTN_TRUE;
} else {
# if USE_ITT_BUILD
__kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock);
# endif
return FTN_FALSE;
}
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
int rc;
if ( ( __kmp_user_lock_kind == lk_tas )
&& ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_test_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
rc = TEST_LOCK( lck, gtid );
#if USE_ITT_BUILD
if ( rc ) {
__kmp_itt_lock_acquired( lck );
} else {
__kmp_itt_lock_cancelled( lck );
}
#endif /* USE_ITT_BUILD */
return ( rc ? FTN_TRUE : FTN_FALSE );
/* Can't use serial interval since not block structured */
#endif // KMP_USE_DYNAMIC_LOCK
}
/* try to acquire the lock */
int
__kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock )
{
#if KMP_USE_DYNAMIC_LOCK
int rc;
# if USE_ITT_BUILD
__kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
# endif
rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid);
# if USE_ITT_BUILD
if (rc) {
__kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
} else {
__kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock);
}
# endif
return rc;
#else // KMP_USE_DYNAMIC_LOCK
kmp_user_lock_p lck;
int rc;
if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll )
+ sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
else if ( ( __kmp_user_lock_kind == lk_futex )
&& ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked )
<= OMP_NEST_LOCK_T_SIZE ) ) {
lck = (kmp_user_lock_p)user_lock;
}
#endif
else {
lck = __kmp_lookup_user_lock( user_lock, "omp_test_nest_lock" );
}
#if USE_ITT_BUILD
__kmp_itt_lock_acquiring( lck );
#endif /* USE_ITT_BUILD */
rc = TEST_NESTED_LOCK( lck, gtid );
#if USE_ITT_BUILD
if ( rc ) {
__kmp_itt_lock_acquired( lck );
} else {
__kmp_itt_lock_cancelled( lck );
}
#endif /* USE_ITT_BUILD */
return rc;
/* Can't use serial interval since not block structured */
#endif // KMP_USE_DYNAMIC_LOCK
}
/*--------------------------------------------------------------------------------------------------------------------*/
/*
* Interface to fast scalable reduce methods routines
*/
// keep the selected method in a thread local structure for cross-function usage: will be used in __kmpc_end_reduce* functions;
// another solution: to re-determine the method one more time in __kmpc_end_reduce* functions (new prototype required then)
// AT: which solution is better?
#define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \
( ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) )
#define __KMP_GET_REDUCTION_METHOD(gtid) \
( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method )
// description of the packed_reduction_method variable: look at the macros in kmp.h
// used in a critical section reduce block
static __forceinline void
__kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) {
// this lock was visible to a customer and to the threading profile tool as a serial overhead span
// (although it's used for an internal purpose only)
// why was it visible in previous implementation?
// should we keep it visible in new reduce block?
kmp_user_lock_p lck;
#if KMP_USE_DYNAMIC_LOCK
kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit;
// Check if it is initialized.
if (*lk == 0) {
if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(__kmp_user_lock_seq));
} else {
__kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(__kmp_user_lock_seq));
}
}
// Branch for accessing the actual lock object and set operation. This branching is inevitable since
// this lock initialization does not follow the normal dispatch path (lock table is not used).
if (KMP_EXTRACT_D_TAG(lk) != 0) {
lck = (kmp_user_lock_p)lk;
KMP_DEBUG_ASSERT(lck != NULL);
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
}
KMP_D_LOCK_FUNC(lk, set)(lk, global_tid);
} else {
kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk);
lck = ilk->lock;
KMP_DEBUG_ASSERT(lck != NULL);
if (__kmp_env_consistency_check) {
__kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
}
KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK
// We know that the fast reduction code is only emitted by Intel compilers
// with 32 byte critical sections. If there isn't enough space, then we
// have to use a pointer.
if ( __kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE ) {
lck = (kmp_user_lock_p)crit;
}
else {
lck = __kmp_get_critical_section_ptr( crit, loc, global_tid );
}
KMP_DEBUG_ASSERT( lck != NULL );
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_critical, loc, lck );
__kmp_acquire_user_lock_with_checks( lck, global_tid );
#endif // KMP_USE_DYNAMIC_LOCK
}
// used in a critical section reduce block
static __forceinline void
__kmp_end_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) {
kmp_user_lock_p lck;
#if KMP_USE_DYNAMIC_LOCK
if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
lck = (kmp_user_lock_p)crit;
if (__kmp_env_consistency_check)
__kmp_pop_sync(global_tid, ct_critical, loc);
KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
} else {
kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit));
if (__kmp_env_consistency_check)
__kmp_pop_sync(global_tid, ct_critical, loc);
KMP_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid);
}
#else // KMP_USE_DYNAMIC_LOCK
// We know that the fast reduction code is only emitted by Intel compilers with 32 byte critical
// sections. If there isn't enough space, then we have to use a pointer.
if ( __kmp_base_user_lock_size > 32 ) {
lck = *( (kmp_user_lock_p *) crit );
KMP_ASSERT( lck != NULL );
} else {
lck = (kmp_user_lock_p) crit;
}
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_critical, loc );
__kmp_release_user_lock_with_checks( lck, global_tid );
#endif // KMP_USE_DYNAMIC_LOCK
} // __kmp_end_critical_section_reduce_block
/* 2.a.i. Reduce Block without a terminating barrier */
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread number
@param num_vars number of items (variables) to be reduced
@param reduce_size size of data in bytes to be reduced
@param reduce_data pointer to data to be reduced
@param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data
@param lck pointer to the unique lock data structure
@result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed
The nowait version is used for a reduce clause with the nowait argument.
*/
kmp_int32
__kmpc_reduce_nowait(
ident_t *loc, kmp_int32 global_tid,
kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
kmp_critical_name *lck ) {
KMP_COUNT_BLOCK(REDUCE_nowait);
int retval = 0;
PACKED_REDUCTION_METHOD_T packed_reduction_method;
#if OMP_40_ENABLED
kmp_team_t *team;
kmp_info_t *th;
int teams_swapped = 0, task_state;
#endif
KA_TRACE( 10, ( "__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) );
// why do we need this initialization here at all?
// Reduction clause can not be used as a stand-alone directive.
// do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed
// possible detection of false-positive race by the threadchecker ???
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
// check correctness of reduce block nesting
#if KMP_USE_DYNAMIC_LOCK
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 );
#else
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_reduce, loc, NULL );
#endif
#if OMP_40_ENABLED
th = __kmp_thread_from_gtid(global_tid);
if( th->th.th_teams_microtask ) { // AC: check if we are inside the teams construct?
team = th->th.th_team;
if( team->t.t_level == th->th.th_teams_level ) {
// this is reduction at teams construct
KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0
// Let's swap teams temporarily for the reduction barrier
teams_swapped = 1;
th->th.th_info.ds.ds_tid = team->t.t_master_tid;
th->th.th_team = team->t.t_parent;
th->th.th_team_nproc = th->th.th_team->t.t_nproc;
th->th.th_task_team = th->th.th_team->t.t_task_team[0];
task_state = th->th.th_task_state;
th->th.th_task_state = 0;
}
}
#endif // OMP_40_ENABLED
// packed_reduction_method value will be reused by __kmp_end_reduce* function, the value should be kept in a variable
// the variable should be either a construct-specific or thread-specific property, not a team specific property
// (a thread can reach the next reduce block on the next construct, reduce method may differ on the next construct)
// an ident_t "loc" parameter could be used as a construct-specific property (what if loc == 0?)
// (if both construct-specific and team-specific variables were shared, then unness extra syncs should be needed)
// a thread-specific variable is better regarding two issues above (next construct and extra syncs)
// a thread-specific "th_local.reduction_method" variable is used currently
// each thread executes 'determine' and 'set' lines (no need to execute by one thread, to avoid unness extra syncs)
packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck );
__KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method );
if( packed_reduction_method == critical_reduce_block ) {
__kmp_enter_critical_section_reduce_block( loc, global_tid, lck );
retval = 1;
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( Intel platforms only )
retval = 1;
} else if( packed_reduction_method == atomic_reduce_block ) {
retval = 2;
// all threads should do this pop here (because __kmpc_end_reduce_nowait() won't be called by the code gen)
// (it's not quite good, because the checking block has been closed by this 'pop',
// but atomic operation has not been executed yet, will be executed slightly later, literally on next instruction)
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_reduce, loc );
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
//AT: performance issue: a real barrier here
//AT: (if master goes slow, other threads are blocked here waiting for the master to come and release them)
//AT: (it's not what a customer might expect specifying NOWAIT clause)
//AT: (specifying NOWAIT won't result in improvement of performance, it'll be confusing to a customer)
//AT: another implementation of *barrier_gather*nowait() (or some other design) might go faster
// and be more in line with sense of NOWAIT
//AT: TO DO: do epcc test and compare times
// this barrier should be invisible to a customer and to the threading profile tool
// (it's neither a terminating barrier nor customer's code, it's used for an internal purpose)
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, FALSE, reduce_size, reduce_data, reduce_func );
retval = ( retval != 0 ) ? ( 0 ) : ( 1 );
// all other workers except master should do this pop here
// ( none of other workers will get to __kmpc_end_reduce_nowait() )
if ( __kmp_env_consistency_check ) {
if( retval == 0 ) {
__kmp_pop_sync( global_tid, ct_reduce, loc );
}
}
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
#if OMP_40_ENABLED
if( teams_swapped ) {
// Restore thread structure
th->th.th_info.ds.ds_tid = 0;
th->th.th_team = team;
th->th.th_team_nproc = team->t.t_nproc;
th->th.th_task_team = team->t.t_task_team[task_state];
th->th.th_task_state = task_state;
}
#endif
KA_TRACE( 10, ( "__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) );
return retval;
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread id.
@param lck pointer to the unique lock data structure
Finish the execution of a reduce nowait.
*/
void
__kmpc_end_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) {
PACKED_REDUCTION_METHOD_T packed_reduction_method;
KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) );
packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid );
if( packed_reduction_method == critical_reduce_block ) {
__kmp_end_critical_section_reduce_block( loc, global_tid, lck );
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( on Intel platforms only )
} else if( packed_reduction_method == atomic_reduce_block ) {
// neither master nor other workers should get here
// (code gen does not generate this call in case 2: atomic reduce block)
// actually it's better to remove this elseif at all;
// after removal this value will checked by the 'else' and will assert
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
// only master gets here
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_reduce, loc );
KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) );
return;
}
/* 2.a.ii. Reduce Block with a terminating barrier */
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread number
@param num_vars number of items (variables) to be reduced
@param reduce_size size of data in bytes to be reduced
@param reduce_data pointer to data to be reduced
@param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data
@param lck pointer to the unique lock data structure
@result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed
A blocking reduce that includes an implicit barrier.
*/
kmp_int32
__kmpc_reduce(
ident_t *loc, kmp_int32 global_tid,
kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
void (*reduce_func)(void *lhs_data, void *rhs_data),
kmp_critical_name *lck )
{
KMP_COUNT_BLOCK(REDUCE_wait);
int retval = 0;
PACKED_REDUCTION_METHOD_T packed_reduction_method;
KA_TRACE( 10, ( "__kmpc_reduce() enter: called T#%d\n", global_tid ) );
// why do we need this initialization here at all?
// Reduction clause can not be a stand-alone directive.
// do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed
// possible detection of false-positive race by the threadchecker ???
if( ! TCR_4( __kmp_init_parallel ) )
__kmp_parallel_initialize();
// check correctness of reduce block nesting
#if KMP_USE_DYNAMIC_LOCK
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 );
#else
if ( __kmp_env_consistency_check )
__kmp_push_sync( global_tid, ct_reduce, loc, NULL );
#endif
packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck );
__KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method );
if( packed_reduction_method == critical_reduce_block ) {
__kmp_enter_critical_section_reduce_block( loc, global_tid, lck );
retval = 1;
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( Intel platforms only )
retval = 1;
} else if( packed_reduction_method == atomic_reduce_block ) {
retval = 2;
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
//case tree_reduce_block:
// this barrier should be visible to a customer and to the threading profile tool
// (it's a terminating barrier on constructs if NOWAIT not specified)
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc; // needed for correct notification of frames
#endif
retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, TRUE, reduce_size, reduce_data, reduce_func );
retval = ( retval != 0 ) ? ( 0 ) : ( 1 );
// all other workers except master should do this pop here
// ( none of other workers except master will enter __kmpc_end_reduce() )
if ( __kmp_env_consistency_check ) {
if( retval == 0 ) { // 0: all other workers; 1: master
__kmp_pop_sync( global_tid, ct_reduce, loc );
}
}
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
KA_TRACE( 10, ( "__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) );
return retval;
}
/*!
@ingroup SYNCHRONIZATION
@param loc source location information
@param global_tid global thread id.
@param lck pointer to the unique lock data structure
Finish the execution of a blocking reduce.
The <tt>lck</tt> pointer must be the same as that used in the corresponding start function.
*/
void
__kmpc_end_reduce( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) {
PACKED_REDUCTION_METHOD_T packed_reduction_method;
KA_TRACE( 10, ( "__kmpc_end_reduce() enter: called T#%d\n", global_tid ) );
packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid );
// this barrier should be visible to a customer and to the threading profile tool
// (it's a terminating barrier on constructs if NOWAIT not specified)
if( packed_reduction_method == critical_reduce_block ) {
__kmp_end_critical_section_reduce_block( loc, global_tid, lck );
// TODO: implicit barrier: should be exposed
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
} else if( packed_reduction_method == empty_reduce_block ) {
// usage: if team size == 1, no synchronization is required ( Intel platforms only )
// TODO: implicit barrier: should be exposed
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
} else if( packed_reduction_method == atomic_reduce_block ) {
// TODO: implicit barrier: should be exposed
#if USE_ITT_NOTIFY
__kmp_threads[global_tid]->th.th_ident = loc;
#endif
__kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL );
} else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) {
// only master executes here (master releases all other workers)
__kmp_end_split_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid );
} else {
// should never reach this block
KMP_ASSERT( 0 ); // "unexpected method"
}
if ( __kmp_env_consistency_check )
__kmp_pop_sync( global_tid, ct_reduce, loc );
KA_TRACE( 10, ( "__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) );
return;
}
#undef __KMP_GET_REDUCTION_METHOD
#undef __KMP_SET_REDUCTION_METHOD
/*-- end of interface to fast scalable reduce routines ---------------------------------------------------------------*/
kmp_uint64
__kmpc_get_taskid() {
kmp_int32 gtid;
kmp_info_t * thread;
gtid = __kmp_get_gtid();
if ( gtid < 0 ) {
return 0;
}; // if
thread = __kmp_thread_from_gtid( gtid );
return thread->th.th_current_task->td_task_id;
} // __kmpc_get_taskid
kmp_uint64
__kmpc_get_parent_taskid() {
kmp_int32 gtid;
kmp_info_t * thread;
kmp_taskdata_t * parent_task;
gtid = __kmp_get_gtid();
if ( gtid < 0 ) {
return 0;
}; // if
thread = __kmp_thread_from_gtid( gtid );
parent_task = thread->th.th_current_task->td_parent;
return ( parent_task == NULL ? 0 : parent_task->td_task_id );
} // __kmpc_get_parent_taskid
void __kmpc_place_threads(int nS, int sO, int nC, int cO, int nT)
{
if ( ! __kmp_init_serial ) {
__kmp_serial_initialize();
}
__kmp_place_num_sockets = nS;
__kmp_place_socket_offset = sO;
__kmp_place_num_cores = nC;
__kmp_place_core_offset = cO;
__kmp_place_num_threads_per_core = nT;
}
#if OMP_41_ENABLED
/*!
@ingroup WORK_SHARING
@param loc source location information.
@param gtid global thread number.
@param num_dims number of associated doacross loops.
@param dims info on loops bounds.
Initialize doacross loop information.
Expect compiler send us inclusive bounds,
e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2.
*/
void
__kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, struct kmp_dim * dims)
{
int j, idx;
kmp_int64 last, trace_count;
kmp_info_t *th = __kmp_threads[gtid];
kmp_team_t *team = th->th.th_team;
kmp_uint32 *flags;
kmp_disp_t *pr_buf = th->th.th_dispatch;
dispatch_shared_info_t *sh_buf;
KA_TRACE(20,("__kmpc_doacross_init() enter: called T#%d, num dims %d, active %d\n",
gtid, num_dims, !team->t.t_serialized));
KMP_DEBUG_ASSERT(dims != NULL);
KMP_DEBUG_ASSERT(num_dims > 0);
if( team->t.t_serialized ) {
KA_TRACE(20,("__kmpc_doacross_init() exit: serialized team\n"));
return; // no dependencies if team is serialized
}
KMP_DEBUG_ASSERT(team->t.t_nproc > 1);
idx = pr_buf->th_doacross_buf_idx++; // Increment index of shared buffer for the next loop
sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers];
// Save bounds info into allocated private buffer
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info == NULL);
pr_buf->th_doacross_info =
(kmp_int64*)__kmp_thread_malloc(th, sizeof(kmp_int64)*(4 * num_dims + 1));
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
pr_buf->th_doacross_info[0] = (kmp_int64)num_dims; // first element is number of dimensions
// Save also address of num_done in order to access it later without knowing the buffer index
pr_buf->th_doacross_info[1] = (kmp_int64)&sh_buf->doacross_num_done;
pr_buf->th_doacross_info[2] = dims[0].lo;
pr_buf->th_doacross_info[3] = dims[0].up;
pr_buf->th_doacross_info[4] = dims[0].st;
last = 5;
for( j = 1; j < num_dims; ++j ) {
kmp_int64 range_length; // To keep ranges of all dimensions but the first dims[0]
if( dims[j].st == 1 ) { // most common case
// AC: should we care of ranges bigger than LLONG_MAX? (not for now)
range_length = dims[j].up - dims[j].lo + 1;
} else {
if( dims[j].st > 0 ) {
KMP_DEBUG_ASSERT(dims[j].up > dims[j].lo);
range_length = (kmp_uint64)(dims[j].up - dims[j].lo) / dims[j].st + 1;
} else { // negative increment
KMP_DEBUG_ASSERT(dims[j].lo > dims[j].up);
range_length = (kmp_uint64)(dims[j].lo - dims[j].up) / (-dims[j].st) + 1;
}
}
pr_buf->th_doacross_info[last++] = range_length;
pr_buf->th_doacross_info[last++] = dims[j].lo;
pr_buf->th_doacross_info[last++] = dims[j].up;
pr_buf->th_doacross_info[last++] = dims[j].st;
}
// Compute total trip count.
// Start with range of dims[0] which we don't need to keep in the buffer.
if( dims[0].st == 1 ) { // most common case
trace_count = dims[0].up - dims[0].lo + 1;
} else if( dims[0].st > 0 ) {
KMP_DEBUG_ASSERT(dims[0].up > dims[0].lo);
trace_count = (kmp_uint64)(dims[0].up - dims[0].lo) / dims[0].st + 1;
} else { // negative increment
KMP_DEBUG_ASSERT(dims[0].lo > dims[0].up);
trace_count = (kmp_uint64)(dims[0].lo - dims[0].up) / (-dims[0].st) + 1;
}
for( j = 1; j < num_dims; ++j ) {
trace_count *= pr_buf->th_doacross_info[4 * j + 1]; // use kept ranges
}
KMP_DEBUG_ASSERT(trace_count > 0);
// Check if shared buffer is not occupied by other loop (idx - __kmp_dispatch_num_buffers)
if( idx != sh_buf->doacross_buf_idx ) {
// Shared buffer is occupied, wait for it to be free
__kmp_wait_yield_4( (kmp_uint32*)&sh_buf->doacross_buf_idx, idx, __kmp_eq_4, NULL );
}
// Check if we are the first thread. After the CAS the first thread gets 0,
// others get 1 if initialization is in progress, allocated pointer otherwise.
flags = (kmp_uint32*)KMP_COMPARE_AND_STORE_RET64(
(kmp_int64*)&sh_buf->doacross_flags,NULL,(kmp_int64)1);
if( flags == NULL ) {
// we are the first thread, allocate the array of flags
kmp_int64 size = trace_count / 8 + 8; // in bytes, use single bit per iteration
sh_buf->doacross_flags = (kmp_uint32*)__kmp_thread_calloc(th, size, 1);
} else if( (kmp_int64)flags == 1 ) {
// initialization is still in progress, need to wait
while( (volatile kmp_int64)sh_buf->doacross_flags == 1 ) {
KMP_YIELD(TRUE);
}
}
KMP_DEBUG_ASSERT((kmp_int64)sh_buf->doacross_flags > 1); // check value of pointer
pr_buf->th_doacross_flags = sh_buf->doacross_flags; // save private copy in order to not
// touch shared buffer on each iteration
KA_TRACE(20,("__kmpc_doacross_init() exit: T#%d\n", gtid));
}
void
__kmpc_doacross_wait(ident_t *loc, int gtid, long long *vec)
{
kmp_int32 shft, num_dims, i;
kmp_uint32 flag;
kmp_int64 iter_number; // iteration number of "collapsed" loop nest
kmp_info_t *th = __kmp_threads[gtid];
kmp_team_t *team = th->th.th_team;
kmp_disp_t *pr_buf;
kmp_int64 lo, up, st;
KA_TRACE(20,("__kmpc_doacross_wait() enter: called T#%d\n", gtid));
if( team->t.t_serialized ) {
KA_TRACE(20,("__kmpc_doacross_wait() exit: serialized team\n"));
return; // no dependencies if team is serialized
}
// calculate sequential iteration number and check out-of-bounds condition
pr_buf = th->th.th_dispatch;
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
num_dims = pr_buf->th_doacross_info[0];
lo = pr_buf->th_doacross_info[2];
up = pr_buf->th_doacross_info[3];
st = pr_buf->th_doacross_info[4];
if( st == 1 ) { // most common case
if( vec[0] < lo || vec[0] > up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[0], lo, up));
return;
}
iter_number = vec[0] - lo;
} else if( st > 0 ) {
if( vec[0] < lo || vec[0] > up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[0], lo, up));
return;
}
iter_number = (kmp_uint64)(vec[0] - lo) / st;
} else { // negative increment
if( vec[0] > lo || vec[0] < up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[0], lo, up));
return;
}
iter_number = (kmp_uint64)(lo - vec[0]) / (-st);
}
for( i = 1; i < num_dims; ++i ) {
kmp_int64 iter, ln;
kmp_int32 j = i * 4;
ln = pr_buf->th_doacross_info[j + 1];
lo = pr_buf->th_doacross_info[j + 2];
up = pr_buf->th_doacross_info[j + 3];
st = pr_buf->th_doacross_info[j + 4];
if( st == 1 ) {
if( vec[i] < lo || vec[i] > up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[i], lo, up));
return;
}
iter = vec[i] - lo;
} else if( st > 0 ) {
if( vec[i] < lo || vec[i] > up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[i], lo, up));
return;
}
iter = (kmp_uint64)(vec[i] - lo) / st;
} else { // st < 0
if( vec[i] > lo || vec[i] < up ) {
KA_TRACE(20,(
"__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n",
gtid, vec[i], lo, up));
return;
}
iter = (kmp_uint64)(lo - vec[i]) / (-st);
}
iter_number = iter + ln * iter_number;
}
shft = iter_number % 32; // use 32-bit granularity
iter_number >>= 5; // divided by 32
flag = 1 << shft;
while( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) {
KMP_YIELD(TRUE);
}
KA_TRACE(20,("__kmpc_doacross_wait() exit: T#%d wait for iter %lld completed\n",
gtid, (iter_number<<5)+shft));
}
void
__kmpc_doacross_post(ident_t *loc, int gtid, long long *vec)
{
kmp_int32 shft, num_dims, i;
kmp_uint32 flag;
kmp_int64 iter_number; // iteration number of "collapsed" loop nest
kmp_info_t *th = __kmp_threads[gtid];
kmp_team_t *team = th->th.th_team;
kmp_disp_t *pr_buf;
kmp_int64 lo, st;
KA_TRACE(20,("__kmpc_doacross_post() enter: called T#%d\n", gtid));
if( team->t.t_serialized ) {
KA_TRACE(20,("__kmpc_doacross_post() exit: serialized team\n"));
return; // no dependencies if team is serialized
}
// calculate sequential iteration number (same as in "wait" but no out-of-bounds checks)
pr_buf = th->th.th_dispatch;
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
num_dims = pr_buf->th_doacross_info[0];
lo = pr_buf->th_doacross_info[2];
st = pr_buf->th_doacross_info[4];
if( st == 1 ) { // most common case
iter_number = vec[0] - lo;
} else if( st > 0 ) {
iter_number = (kmp_uint64)(vec[0] - lo) / st;
} else { // negative increment
iter_number = (kmp_uint64)(lo - vec[0]) / (-st);
}
for( i = 1; i < num_dims; ++i ) {
kmp_int64 iter, ln;
kmp_int32 j = i * 4;
ln = pr_buf->th_doacross_info[j + 1];
lo = pr_buf->th_doacross_info[j + 2];
st = pr_buf->th_doacross_info[j + 4];
if( st == 1 ) {
iter = vec[i] - lo;
} else if( st > 0 ) {
iter = (kmp_uint64)(vec[i] - lo) / st;
} else { // st < 0
iter = (kmp_uint64)(lo - vec[i]) / (-st);
}
iter_number = iter + ln * iter_number;
}
shft = iter_number % 32; // use 32-bit granularity
iter_number >>= 5; // divided by 32
flag = 1 << shft;
if( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 )
KMP_TEST_THEN_OR32( (kmp_int32*)&pr_buf->th_doacross_flags[iter_number], (kmp_int32)flag );
KA_TRACE(20,("__kmpc_doacross_post() exit: T#%d iter %lld posted\n",
gtid, (iter_number<<5)+shft));
}
void
__kmpc_doacross_fini(ident_t *loc, int gtid)
{
kmp_int64 num_done;
kmp_info_t *th = __kmp_threads[gtid];
kmp_team_t *team = th->th.th_team;
kmp_disp_t *pr_buf = th->th.th_dispatch;
KA_TRACE(20,("__kmpc_doacross_fini() enter: called T#%d\n", gtid));
if( team->t.t_serialized ) {
KA_TRACE(20,("__kmpc_doacross_fini() exit: serialized team %p\n", team));
return; // nothing to do
}
num_done = KMP_TEST_THEN_INC64((kmp_int64*)pr_buf->th_doacross_info[1]) + 1;
if( num_done == th->th.th_team_nproc ) {
// we are the last thread, need to free shared resources
int idx = pr_buf->th_doacross_buf_idx - 1;
dispatch_shared_info_t *sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers];
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info[1] == (kmp_int64)&sh_buf->doacross_num_done);
KMP_DEBUG_ASSERT(num_done == (kmp_int64)sh_buf->doacross_num_done);
KMP_DEBUG_ASSERT(idx == sh_buf->doacross_buf_idx);
__kmp_thread_free(th, (void*)sh_buf->doacross_flags);
sh_buf->doacross_flags = NULL;
sh_buf->doacross_num_done = 0;
sh_buf->doacross_buf_idx += __kmp_dispatch_num_buffers; // free buffer for future re-use
}
// free private resources (need to keep buffer index forever)
__kmp_thread_free(th, (void*)pr_buf->th_doacross_info);
pr_buf->th_doacross_info = NULL;
KA_TRACE(20,("__kmpc_doacross_fini() exit: T#%d\n", gtid));
}
#endif
// end of file //
|
core_cgemm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zgemm.c, normal z -> c, Fri Sep 28 17:38:19 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_gemm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha [op( A )\times op( B )] + \beta C, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^H, \f]
*
* alpha and beta are scalars, and A, B and C are matrices, with op( A )
* an m-by-k matrix, op( B ) a k-by-n matrix and C an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] transb
* - PlasmaNoTrans: B is not transposed,
* - PlasmaTrans: B is transposed,
* - PlasmaConjTrans: B is conjugate transposed.
*
* @param[in] m
* The number of rows of the matrix op( A ) and of the matrix C.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrix op( B ) and of the matrix C.
* n >= 0.
*
* @param[in] k
* The number of columns of the matrix op( A ) and the number of rows
* of the matrix op( B ). k >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* An lda-by-ka matrix, where ka is k when transa = PlasmaNoTrans,
* and is m otherwise.
*
* @param[in] lda
* The leading dimension of the array A.
* When transa = PlasmaNoTrans, lda >= max(1,m),
* otherwise, lda >= max(1,k).
*
* @param[in] B
* An ldb-by-kb matrix, where kb is n when transb = PlasmaNoTrans,
* and is k otherwise.
*
* @param[in] ldb
* The leading dimension of the array B.
* When transb = PlasmaNoTrans, ldb >= max(1,k),
* otherwise, ldb >= max(1,n).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* An ldc-by-n matrix. On exit, the array is overwritten by the m-by-n
* matrix ( alpha*op( A )*op( B ) + beta*C ).
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_cgemm(plasma_enum_t transa, plasma_enum_t transb,
int m, int n, int k,
plasma_complex32_t alpha, const plasma_complex32_t *A, int lda,
const plasma_complex32_t *B, int ldb,
plasma_complex32_t beta, plasma_complex32_t *C, int ldc)
{
cblas_cgemm(CblasColMajor,
(CBLAS_TRANSPOSE)transa, (CBLAS_TRANSPOSE)transb,
m, n, k,
CBLAS_SADDR(alpha), A, lda,
B, ldb,
CBLAS_SADDR(beta), C, ldc);
}
/******************************************************************************/
void plasma_core_omp_cgemm(
plasma_enum_t transa, plasma_enum_t transb,
int m, int n, int k,
plasma_complex32_t alpha, const plasma_complex32_t *A, int lda,
const plasma_complex32_t *B, int ldb,
plasma_complex32_t beta, plasma_complex32_t *C, int ldc,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (transa == PlasmaNoTrans)
ak = k;
else
ak = m;
int bk;
if (transb == PlasmaNoTrans)
bk = n;
else
bk = k;
#pragma omp task depend(in:A[0:lda*ak]) \
depend(in:B[0:ldb*bk]) \
depend(inout:C[0:ldc*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_cgemm(transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc);
}
}
|
level.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
#ifdef USE_MPI
#include <mpi.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
//------------------------------------------------------------------------------------------------------------------------------
#include "level.h"
#include "operators.h"
//------------------------------------------------------------------------------------------------------------------------------
void print_communicator(int printSendRecv, int rank, int level, communicator_type *comm){
int i;
printf("rank=%2d level=%d ",rank,level);
if(printSendRecv & 0x1){
printf("num_sends=%2d ",comm->num_sends);
printf("send_ranks=[ ");for(i=0;i<comm->num_sends;i++)printf("%2d ",comm->send_ranks[i]);printf("] ");
printf("send_sizes=[ ");for(i=0;i<comm->num_sends;i++)printf("%2d ",comm->send_sizes[i]);printf("] ");
printf("send_buffers=[ ");for(i=0;i<comm->num_sends;i++)printf("%08lx ",(uint64_t)comm->send_buffers[i]);printf("] ");
for(i=0;i<comm->num_blocks[0];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[0][i].dim.i,comm->blocks[0][i].dim.j,comm->blocks[0][i].dim.k,comm->blocks[0][i].read.i,comm->blocks[0][i].read.j,comm->blocks[0][i].read.k,comm->blocks[0][i].read.jStride,comm->blocks[0][i].read.kStride,comm->blocks[0][i].write.i,comm->blocks[0][i].write.j,comm->blocks[0][i].write.k,comm->blocks[0][i].write.jStride,comm->blocks[0][i].write.kStride);
printf("\n");
}
if(printSendRecv & 0x2){
for(i=0;i<comm->num_blocks[1];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[1][i].dim.i,comm->blocks[1][i].dim.j,comm->blocks[1][i].dim.k,comm->blocks[1][i].read.i,comm->blocks[1][i].read.j,comm->blocks[1][i].read.k,comm->blocks[1][i].read.jStride,comm->blocks[1][i].read.kStride,comm->blocks[1][i].write.i,comm->blocks[1][i].write.j,comm->blocks[1][i].write.k,comm->blocks[1][i].write.jStride,comm->blocks[1][i].write.kStride);
printf("\n");
}
if(printSendRecv & 0x4){
printf("num_recvs=%2d ",comm->num_recvs);
printf("recv_ranks=[ ");for(i=0;i<comm->num_recvs;i++)printf("%2d ",comm->recv_ranks[i]);printf("] ");
printf("recv_sizes=[ ");for(i=0;i<comm->num_recvs;i++)printf("%2d ",comm->recv_sizes[i]);printf("] ");
printf("recv_buffers=[ ");for(i=0;i<comm->num_recvs;i++)printf("%08lx ",(uint64_t)comm->recv_buffers[i]);printf("] ");
for(i=0;i<comm->num_blocks[2];i++)printf("[ %dx%dx%d from %d %d %d %d %d to %d %d %d %d %d ] ",comm->blocks[2][i].dim.i,comm->blocks[2][i].dim.j,comm->blocks[2][i].dim.k,comm->blocks[2][i].read.i,comm->blocks[2][i].read.j,comm->blocks[2][i].read.k,comm->blocks[2][i].read.jStride,comm->blocks[2][i].read.kStride,comm->blocks[2][i].write.i,comm->blocks[2][i].write.j,comm->blocks[2][i].write.k,comm->blocks[2][i].write.jStride,comm->blocks[2][i].write.kStride);
printf("\n");
}
fflush(stdout);
}
//------------------------------------------------------------------------------------------------------------------------------
typedef struct {
int sendRank;
int sendBoxID;
int sendBox;
int sendDir;
int recvRank;
int recvBoxID;
int recvBox;
} GZ_type;
int qsortGZ(const void *a, const void*b){
GZ_type *gza = (GZ_type*)a;
GZ_type *gzb = (GZ_type*)b;
// by convention, MPI buffers are first sorted by sendRank
if(gza->sendRank < gzb->sendRank)return(-1);
if(gza->sendRank > gzb->sendRank)return( 1);
// then by sendBoxID
if(gza->sendBoxID < gzb->sendBoxID)return(-1);
if(gza->sendBoxID > gzb->sendBoxID)return( 1);
// and finally by the direction sent
if(gza->sendDir < gzb->sendDir)return(-1);
if(gza->sendDir > gzb->sendDir)return( 1);
return(0);
}
int qsortInt(const void *a, const void *b){
int *ia = (int*)a;
int *ib = (int*)b;
if(*ia < *ib)return(-1);
if(*ia > *ib)return( 1);
return( 0);
}
//------------------------------------------------------------------------------------------------------------------------------
// should implement a 3D hilbert curve on non pow2 (but cubical) domain sizes
//void decompose_level_hilbert(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int rank_lo, int ranks){
//}
//------------------------------------------------------------------------------------------------------------------------------
void decompose_level_lex(int *rank_of_box, int idim, int jdim, int kdim, int ranks){
// simple lexicographical decomposition of the domain (i-j-k ordering)
int boxes = idim*jdim*kdim;
int i,j,k;
for(k=0;k<kdim;k++){
for(j=0;j<jdim;j++){
for(i=0;i<idim;i++){
int b = k*jdim*idim + j*idim + i;
rank_of_box[b] = ((uint64_t)ranks*(uint64_t)b)/(uint64_t)boxes; // ranks*b can be as larger than ranks^2... can over flow int
}}}
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void decompose_level_bisection_special(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int rank_lo, int ranks){
// recursive bisection (or prime-section) of the domain
// can lead to imbalance unless the number of processes and number of boxes per process are chosen well
#define numPrimes 13
//int primes[numPrimes] = {41,37,31,29,23,19,17,13,11,7,5,3,2};
int primes[numPrimes] = {2,3,5,7,11,13,17,19,23,29,31,37,41};
int i,j,k,p,f,ff;
// base case, no further recursion...
if( (ranks==1)|| ((idim==1)&&(jdim==1)&&(kdim==1)) ){
for(i=ilo;i<ilo+idim;i++){
for(j=jlo;j<jlo+jdim;j++){
for(k=klo;k<klo+kdim;k++){
int b = i + j*jStride + k*kStride;
rank_of_box[b] = rank_lo;
}}}
return;
}
// special cases for perfectly matched problem sizes with numbers of processes (but not powers of 2)...
for(p=0;p<numPrimes;p++){
f=primes[p];
if( (kdim>=idim)&&(kdim>=jdim) ){if( (kdim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo+ff*kdim/f,idim,jdim,kdim/f,rank_lo+ff*ranks/f,ranks/f);return;}}
if( (jdim>=idim)&&(jdim>=kdim) ){if( (jdim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo+ff*jdim/f,klo,idim,jdim/f,kdim,rank_lo+ff*ranks/f,ranks/f);return;}}
if( (idim>=jdim)&&(idim>=kdim) ){if( (idim%f==0) && (ranks%f==0) ){for(ff=0;ff<f;ff++)decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo+ff*idim/f,jlo,klo,idim/f,jdim,kdim,rank_lo+ff*ranks/f,ranks/f);return;}}
}
// try and bisect the domain in the i-dimension
if( (idim>=jdim)&&(idim>=kdim) ){
int dim0 = (int)(0.5*(double)idim + 0.50);
int dim1 = idim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)idim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo ,jlo,klo,dim0,jdim,kdim,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo+dim0,jlo,klo,dim1,jdim,kdim,rank_lo+r0,r1); // hi
return;
}
// try and bisect the domain in the j-dimension
if( (jdim>=idim)&&(jdim>=kdim) ){
int dim0 = (int)(0.5*(double)jdim + 0.50);
int dim1 = jdim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)jdim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo ,klo,idim,dim0,kdim,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo+dim0,klo,idim,dim1,kdim,rank_lo+r0,r1); // hi
return;
}
// try and bisect the domain in the k-dimension
if( (kdim>=idim)&&(kdim>=jdim) ){
int dim0 = (int)(0.5*(double)kdim + 0.50);
int dim1 = kdim-dim0;
int r0 = (int)( 0.5 + (double)ranks*(double)dim0/(double)kdim );
int r1 = ranks-r0;
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo ,idim,jdim,dim0,rank_lo ,r0); // lo
decompose_level_bisection_special(rank_of_box,jStride,kStride,ilo,jlo,klo+dim0,idim,jdim,dim1,rank_lo+r0,r1); // hi
return;
}
fprintf(stderr,"decompose_level_bisection_special failed !!!\n");exit(0);
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void decompose_level_bisection(int *rank_of_box, int jStride, int kStride, int ilo, int jlo, int klo, int idim, int jdim, int kdim, int ranks, int sfc_offset, int sfc_max_length){
// base case...
if( (idim==1) && (jdim==1) && (kdim==1) ){
int b = ilo + jlo*jStride + klo*kStride;
rank_of_box[b] = ((uint64_t)ranks*(uint64_t)sfc_offset)/(uint64_t)sfc_max_length; // sfc_max_length is the precomputed maximum length
return;
}
// try and bisect the domain in the i-dimension
if( (idim>=jdim)&&(idim>=kdim) ){
int dim0 = (int)(0.5*(double)idim + 0.50);
int dim1 = idim-dim0;
int sfc_delta = dim0*jdim*kdim;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo ,jlo,klo,dim0,jdim,kdim,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo+dim0,jlo,klo,dim1,jdim,kdim,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// try and bisect the domain in the j-dimension
if( (jdim>=idim)&&(jdim>=kdim) ){
int dim0 = (int)(0.5*(double)jdim + 0.50);
int dim1 = jdim-dim0;
int sfc_delta = idim*dim0*kdim;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo ,klo,idim,dim0,kdim,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo+dim0,klo,idim,dim1,kdim,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// try and bisect the domain in the k-dimension
if( (kdim>=idim)&&(kdim>=jdim) ){
int dim0 = (int)(0.5*(double)kdim + 0.50);
int dim1 = kdim-dim0;
int sfc_delta = idim*jdim*dim0;
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo,klo ,idim,jdim,dim0,ranks,sfc_offset ,sfc_max_length); // lo
decompose_level_bisection(rank_of_box,jStride,kStride,ilo,jlo,klo+dim0,idim,jdim,dim1,ranks,sfc_offset+sfc_delta,sfc_max_length); // hi
return;
}
// failure...
fprintf(stderr,"decompose_level_bisection failed !!!\n");exit(0);
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void print_decomposition(level_type *level){
if(level->my_rank!=0)return;
printf("\n");
int i,j,k;
int jStride = level->boxes_in.i;
int kStride = level->boxes_in.i*level->boxes_in.j;
for(k=level->boxes_in.k-1;k>=0;k--){ // (i,j,k)=(0,0,0) is bottom left corner
for(j=level->boxes_in.j-1;j>=0;j--){ // (i,j)=(0,0) is bottom left corner
for(i=0;i<j;i++)printf(" ");
for(i=0;i<level->boxes_in.i;i++){
int b = i + j*jStride + k*kStride;
printf("%4d ",level->rank_of_box[b]);
}printf("\n");
}printf("\n\n");
}
fflush(stdout);
}
//------------------------------------------------------------------------------------------------------------------------------
#ifndef BLOCK_LIST_MIN_SIZE
#define BLOCK_LIST_MIN_SIZE 1000
#endif
void append_block_to_list(blockCopy_type ** blocks, int *allocated_blocks, int *num_blocks,
int dim_i, int dim_j, int dim_k,
int read_box, double* read_ptr, int read_i, int read_j, int read_k, int read_jStride, int read_kStride, int read_scale,
int write_box, double* write_ptr, int write_i, int write_j, int write_k, int write_jStride, int write_kStride, int write_scale,
int blockcopy_tile_i, int blockcopy_tile_j, int blockcopy_tile_k,
int subtype
){
int ii,jj,kk;
// Take a dim_j x dim_k iteration space and tile it into smaller faces of size blockcopy_tile_j x blockcopy_tile_k
// This increases the number of blockCopies in the ghost zone exchange and thereby increases the thread-level parallelism
// FIX... move from lexicographical ordering of tiles to recursive (e.g. z-mort)
// read_/write_scale are used to stride appropriately when read and write loop iterations spaces are different
// ghostZone: read_scale=1, write_scale=1
// interpolation: read_scale=1, write_scale=2
// restriction: read_scale=2, write_scale=1
// FIX... dim_i,j,k -> read_dim_i,j,k, write_dim_i,j,k
for(kk=0;kk<dim_k;kk+=blockcopy_tile_k){
for(jj=0;jj<dim_j;jj+=blockcopy_tile_j){
for(ii=0;ii<dim_i;ii+=blockcopy_tile_i){
int dim_k_mod = dim_k-kk;if(dim_k_mod>blockcopy_tile_k)dim_k_mod=blockcopy_tile_k;
int dim_j_mod = dim_j-jj;if(dim_j_mod>blockcopy_tile_j)dim_j_mod=blockcopy_tile_j;
int dim_i_mod = dim_i-ii;if(dim_i_mod>blockcopy_tile_i)dim_i_mod=blockcopy_tile_i;
if(*num_blocks >= *allocated_blocks){
int oldSize = *allocated_blocks;
if(*allocated_blocks == 0){*allocated_blocks=BLOCK_LIST_MIN_SIZE;*blocks=(blockCopy_type*) malloc( (*allocated_blocks)*sizeof(blockCopy_type));}
else{*allocated_blocks*=2; *blocks=(blockCopy_type*)realloc((void*)(*blocks),(*allocated_blocks)*sizeof(blockCopy_type));}
if(*blocks == NULL){fprintf(stderr,"realloc failed - append_block_to_list (%d -> %d)\n",oldSize,*allocated_blocks);exit(0);}
}
(*blocks)[*num_blocks].subtype = subtype;
(*blocks)[*num_blocks].dim.i = dim_i_mod;
(*blocks)[*num_blocks].dim.j = dim_j_mod;
(*blocks)[*num_blocks].dim.k = dim_k_mod;
(*blocks)[*num_blocks].read.box = read_box;
(*blocks)[*num_blocks].read.ptr = read_ptr;
(*blocks)[*num_blocks].read.i = read_i + read_scale*ii;
(*blocks)[*num_blocks].read.j = read_j + read_scale*jj;
(*blocks)[*num_blocks].read.k = read_k + read_scale*kk;
(*blocks)[*num_blocks].read.jStride = read_jStride;
(*blocks)[*num_blocks].read.kStride = read_kStride;
(*blocks)[*num_blocks].write.box = write_box;
(*blocks)[*num_blocks].write.ptr = write_ptr;
(*blocks)[*num_blocks].write.i = write_i + write_scale*ii;
(*blocks)[*num_blocks].write.j = write_j + write_scale*jj;
(*blocks)[*num_blocks].write.k = write_k + write_scale*kk;
(*blocks)[*num_blocks].write.jStride = write_jStride;
(*blocks)[*num_blocks].write.kStride = write_kStride;
(*num_blocks)++;
}}}
}
//----------------------------------------------------------------------------------------------------------------------------------------------------
// create a mini program that traverses the domain boundary intersecting with this process's boxes
// This includes faces, corners, and edges
void build_boundary_conditions(level_type *level, int justFaces){
level->boundary_condition.blocks[justFaces] = NULL; // default for periodic (i.e. no BC's)
level->boundary_condition.num_blocks[justFaces] = 0; // default for periodic (i.e. no BC's)
level->boundary_condition.allocated_blocks[justFaces] = 0; // default for periodic (i.e. no BC's)
if(level->boundary_condition.type == BC_PERIODIC)return;
int faces[27] = {0,0,0,0,1,0,0,0,0, 0,1,0,1,0,1,0,1,0, 0,0,0,0,1,0,0,0,0};
int edges[27] = {0,1,0,1,0,1,0,1,0, 1,0,1,0,0,0,1,0,1, 0,1,0,1,0,1,0,1,0};
int corners[27] = {1,0,1,0,0,0,1,0,1, 0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,1,0,1};
int box, di,dj,dk;
for(box=0;box<level->num_my_boxes;box++){ // traverse my list of boxes...
for(dk=-1;dk<=1;dk++){ // for each box, examine its 26 neighbors...
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk;
// determine if this region (box's di,dj,dk ghost zone) is outside of the domain
int regionIsOutside=0;
int normal = 13; // normal effectively defines the normal vector to the domain for this region...
// this addition is necessary for linearly interpolated BC's as a box's corner is not necessarily a domain's corner
int myBox_i = level->my_boxes[box].low.i / level->box_dim;
int myBox_j = level->my_boxes[box].low.j / level->box_dim;
int myBox_k = level->my_boxes[box].low.k / level->box_dim;
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( neighborBox_i < 0 ){regionIsOutside=1;normal-=1;}
if( neighborBox_j < 0 ){regionIsOutside=1;normal-=3;}
if( neighborBox_k < 0 ){regionIsOutside=1;normal-=9;}
if( neighborBox_i >=level->boxes_in.i ){regionIsOutside=1;normal+=1;}
if( neighborBox_j >=level->boxes_in.j ){regionIsOutside=1;normal+=3;}
if( neighborBox_k >=level->boxes_in.k ){regionIsOutside=1;normal+=9;}
// calculate ghost zone region size and coordinates relative to the first non-ghost zone element (0,0,0)
int block_i=-1,block_j=-1,block_k=-1;
int dim_i=-1, dim_j=-1, dim_k=-1;
switch(di){
case -1:dim_i=level->box_ghosts;block_i=0-level->box_ghosts;break;
case 0:dim_i=level->box_dim; block_i=0; break;
case 1:dim_i=level->box_ghosts;block_i=0+level->box_dim; break;
}
switch(dj){
case -1:dim_j=level->box_ghosts;block_j=0-level->box_ghosts;break;
case 0:dim_j=level->box_dim; block_j=0; break;
case 1:dim_j=level->box_ghosts;block_j=0+level->box_dim; break;
}
switch(dk){
case -1:dim_k=level->box_ghosts;block_k=0-level->box_ghosts;break;
case 0:dim_k=level->box_dim; block_k=0; break;
case 1:dim_k=level->box_ghosts;block_k=0+level->box_dim; break;
}
if(justFaces && (faces[dir]==0))regionIsOutside=0;
if(regionIsOutside){
append_block_to_list(&(level->boundary_condition.blocks[justFaces]),&(level->boundary_condition.allocated_blocks[justFaces]),&(level->boundary_condition.num_blocks[justFaces]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
/* read.box = */ box,
/* read.ptr = */ NULL,
/* read.i = */ block_i,
/* read.j = */ block_j,
/* read.k = */ block_k,
/* read.jStride = */ level->my_boxes[box].jStride,
/* read.kStride = */ level->my_boxes[box].kStride,
/* read.scale = */ 1,
/* write.box = */ box,
/* write.ptr = */ NULL,
/* write.i = */ block_i,
/* write.j = */ block_j,
/* write.k = */ block_k,
/* write.jStride = */ level->my_boxes[box].jStride,
/* write.kStride = */ level->my_boxes[box].kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I < level->box_ghosts ? level->box_ghosts : BLOCKCOPY_TILE_I, // BC's may never tile smaller than the ghost zone depth
/* blockcopy_j = */ BLOCKCOPY_TILE_J < level->box_ghosts ? level->box_ghosts : BLOCKCOPY_TILE_J, // BC's may never tile smaller than the ghost zone depth
/* blockcopy_k = */ BLOCKCOPY_TILE_K < level->box_ghosts ? level->box_ghosts : BLOCKCOPY_TILE_K, // BC's may never tile smaller than the ghost zone depth
/* subtype = */ normal
);
}}}}}
}
//----------------------------------------------------------------------------------------------------------------------------------------------------
// create a mini program that packs data into MPI recv buffers, exchanges local data, and unpacks the MPI send buffers
// broadly speaking...
// 1. traverse my list of Boxes and create a list of ghosts that must be sent
// 2. create a list of neighbors to send to
// 3. allocate and populate the pack list and allocate the send buffers
// 4. allocate and populate the local exchange list
// 5. traverse my list of Boxes and create a list of ghosts that must be received
// 6. create a list of neighbors to receive from
// 7. allocate and populate the unpack list and allocate the recv buffers
//
// thus a ghost zone exchange is
// 1. prepost a Irecv for each MPI recv buffer (1 per neighbor)
// 2. traverse the pack list
// 3. post the Isends for each MPI send buffer (1 per neighbor)
// 4. traverse the local copy list
// 5. waitall
// 6. traverse the unpack list
//
// / 24 25 26 /
// / 21 22 23 / (k+1)
// / 18 19 20 /
//
// / 15 16 17 /
// / 12 13 14 / (k)
// / 9 10 11 /
//
// / 6 7 8 /
// / 3 4 5 / (k-1)
// / 0 1 2 /
//
void build_exchange_ghosts(level_type *level, int justFaces){
int faces[27] = {0,0,0,0,1,0,0,0,0, 0,1,0,1,0,1,0,1,0, 0,0,0,0,1,0,0,0,0};
int edges[27] = {0,1,0,1,0,1,0,1,0, 1,0,1,0,0,0,1,0,1, 0,1,0,1,0,1,0,1,0};
int corners[27] = {1,0,1,0,0,0,1,0,1, 0,0,0,0,0,0,0,0,0, 1,0,1,0,0,0,1,0,1};
level->exchange_ghosts[justFaces].num_recvs = 0;
level->exchange_ghosts[justFaces].num_sends = 0;
level->exchange_ghosts[justFaces].blocks[0] = NULL;
level->exchange_ghosts[justFaces].blocks[1] = NULL;
level->exchange_ghosts[justFaces].blocks[2] = NULL;
level->exchange_ghosts[justFaces].num_blocks[0] = 0;
level->exchange_ghosts[justFaces].num_blocks[1] = 0;
level->exchange_ghosts[justFaces].num_blocks[2] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[0] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[1] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[2] = 0;
int CommunicateThisDir[27];
int n;for(n=0;n<27;n++)CommunicateThisDir[n]=1;CommunicateThisDir[13]=0;
if(justFaces)for(n=0;n<27;n++)CommunicateThisDir[n]=faces[n];
int sendBox,recvBox;
int stage;
int _rank;
int ghost,numGhosts,numGhostsRemote;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// traverse my list of boxes and create a lists of neighboring boxes and neighboring ranks
GZ_type *ghostsToSend = (GZ_type*)malloc(26*level->num_my_boxes*sizeof(GZ_type)); // There are at most 26 neighbors per box.
int *sendRanks = ( int*)malloc(26*level->num_my_boxes*sizeof( int)); // There are at most 26 neighbors per box.
if(level->num_my_boxes>0){
if(ghostsToSend == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/ghostsToSend\n");exit(0);}
if(sendRanks == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/sendRanks \n");exit(0);}
}
numGhosts = 0;
numGhostsRemote = 0;
for(sendBox=0;sendBox<level->num_my_boxes;sendBox++){
int di,dj,dk;
for(dk=-1;dk<=1;dk++){
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk;if(CommunicateThisDir[dir]){
int myBoxID = level->my_boxes[sendBox].global_box_id;
int myBox_i = level->my_boxes[sendBox].low.i / level->box_dim;
int myBox_j = level->my_boxes[sendBox].low.j / level->box_dim;
int myBox_k = level->my_boxes[sendBox].low.k / level->box_dim;
int neighborBoxID = -1;
if(level->boundary_condition.type == BC_PERIODIC){
int neighborBox_i = ( myBox_i + di + level->boxes_in.i) % level->boxes_in.i;
int neighborBox_j = ( myBox_j + dj + level->boxes_in.j) % level->boxes_in.j;
int neighborBox_k = ( myBox_k + dk + level->boxes_in.k) % level->boxes_in.k;
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}else{
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( (neighborBox_i>=0) && (neighborBox_i<level->boxes_in.i) &&
(neighborBox_j>=0) && (neighborBox_j<level->boxes_in.j) &&
(neighborBox_k>=0) && (neighborBox_k<level->boxes_in.k) ){ // i.e. the neighbor is a valid box
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}
}
if(neighborBoxID>=0){
if( level->rank_of_box[neighborBoxID] != -1 ){
ghostsToSend[numGhosts].sendRank = level->my_rank;
ghostsToSend[numGhosts].sendBoxID = myBoxID;
ghostsToSend[numGhosts].sendBox = sendBox;
ghostsToSend[numGhosts].sendDir = dir;
ghostsToSend[numGhosts].recvRank = level->rank_of_box[neighborBoxID];
ghostsToSend[numGhosts].recvBoxID = neighborBoxID;
ghostsToSend[numGhosts].recvBox = -1;
if( level->rank_of_box[neighborBoxID] != level->my_rank ){
sendRanks[numGhostsRemote++] = level->rank_of_box[neighborBoxID];
}else{
int recvBox=0;while(level->my_boxes[recvBox].global_box_id!=neighborBoxID)recvBox++; // search my list of boxes for the appropriate recvBox index
ghostsToSend[numGhosts].recvBox = recvBox;
}
numGhosts++;
}}
}}}}
}
// sort boxes by sendRank(==my rank) then by sendBoxID... ensures the sends and receive buffers are always sorted by sendBoxID...
qsort(ghostsToSend,numGhosts ,sizeof(GZ_type),qsortGZ );
// sort the lists of neighboring ranks and remove duplicates...
qsort(sendRanks ,numGhostsRemote,sizeof( int),qsortInt);
int numSendRanks=0;_rank=-1;for(ghost=0;ghost<numGhostsRemote;ghost++)if(sendRanks[ghost] != _rank){_rank=sendRanks[ghost];sendRanks[numSendRanks++]=sendRanks[ghost];}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// in a two-stage process, traverse the list of ghosts and allocate the pack/local lists as well as the MPI buffers, and then populate the pack/local lists
level->exchange_ghosts[justFaces].num_sends = numSendRanks;
level->exchange_ghosts[justFaces].send_ranks = (int*)malloc(numSendRanks*sizeof(int));
level->exchange_ghosts[justFaces].send_sizes = (int*)malloc(numSendRanks*sizeof(int));
level->exchange_ghosts[justFaces].send_buffers = (double**)malloc(numSendRanks*sizeof(double*));
if(numSendRanks>0){
if(level->exchange_ghosts[justFaces].send_ranks ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_ranks\n",justFaces);exit(0);}
if(level->exchange_ghosts[justFaces].send_sizes ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_sizes\n",justFaces);exit(0);}
if(level->exchange_ghosts[justFaces].send_buffers==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_buffers\n",justFaces);exit(0);}
}
level->exchange_ghosts[justFaces].blocks[0] = NULL;
level->exchange_ghosts[justFaces].blocks[1] = NULL;
level->exchange_ghosts[justFaces].num_blocks[0] = 0;
level->exchange_ghosts[justFaces].num_blocks[1] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[0] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[1] = 0;
for(stage=0;stage<=1;stage++){
// stage=0... traverse the list and calculate the buffer sizes
// stage=1... allocate MPI send buffers, traverse the list, and populate the unpack/local lists...
int neighbor;
for(neighbor=0;neighbor<numSendRanks;neighbor++){
if(stage==1){
level->exchange_ghosts[justFaces].send_buffers[neighbor] = (double*)malloc(level->exchange_ghosts[justFaces].send_sizes[neighbor]*sizeof(double));
if(level->exchange_ghosts[justFaces].send_sizes[neighbor]>0)
if(level->exchange_ghosts[justFaces].send_buffers[neighbor]==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].send_buffers[neighbor]\n",justFaces);exit(0);}
memset(level->exchange_ghosts[justFaces].send_buffers[neighbor], 0,level->exchange_ghosts[justFaces].send_sizes[neighbor]*sizeof(double));
}
level->exchange_ghosts[justFaces].send_ranks[neighbor]=sendRanks[neighbor];
level->exchange_ghosts[justFaces].send_sizes[neighbor]=0;
}
for(ghost=0;ghost<numGhosts;ghost++){
int dim_i=-1, dim_j=-1, dim_k=-1;
int send_i=-1,send_j=-1,send_k=-1;
int recv_i=-1,recv_j=-1,recv_k=-1;
// decode ghostsToSend[ghost].sendDir (direction sent) into di/dj/dk
int di = ((ghostsToSend[ghost].sendDir % 3) )-1;
int dj = ((ghostsToSend[ghost].sendDir % 9)/3)-1;
int dk = ((ghostsToSend[ghost].sendDir / 9) )-1;
switch(di){ // direction relative to sender
case -1:send_i=0; dim_i=level->box_ghosts;recv_i= level->box_dim; break;
case 0:send_i=0; dim_i=level->box_dim; recv_i=0; break;
case 1:send_i=level->box_dim-level->box_ghosts;dim_i=level->box_ghosts;recv_i=0-level->box_ghosts;break;
}
switch(dj){ // direction relative to sender
case -1:send_j=0; dim_j=level->box_ghosts;recv_j= level->box_dim; break;
case 0:send_j=0; dim_j=level->box_dim; recv_j=0; break;
case 1:send_j=level->box_dim-level->box_ghosts;dim_j=level->box_ghosts;recv_j=0-level->box_ghosts;break;
}
switch(dk){ // direction relative to sender
case -1:send_k=0; dim_k=level->box_ghosts;recv_k= level->box_dim; break;
case 0:send_k=0; dim_k=level->box_dim; recv_k=0; break;
case 1:send_k=level->box_dim-level->box_ghosts;dim_k=level->box_ghosts;recv_k=0-level->box_ghosts;break;
}
// determine if this ghost requires a pack or local exchange
int LocalExchange; // 0 = pack list, 1 = local exchange list
if(ghostsToSend[ghost].recvRank != level->my_rank){
LocalExchange=0; // pack
neighbor=0;while(level->exchange_ghosts[justFaces].send_ranks[neighbor] != ghostsToSend[ghost].recvRank)neighbor++;
}else{
LocalExchange=1; // local
neighbor=-1;
}
if(stage==1){
if(LocalExchange) // append to the local exchange list...
append_block_to_list(&(level->exchange_ghosts[justFaces].blocks[1]),&(level->exchange_ghosts[justFaces].allocated_blocks[1]),&(level->exchange_ghosts[justFaces].num_blocks[1]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
/* read.box = */ ghostsToSend[ghost].sendBox,
/* read.ptr = */ NULL,
/* read.i = */ send_i,
/* read.j = */ send_j,
/* read.k = */ send_k,
/* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].jStride,
/* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].kStride,
/* read.scale = */ 1,
/* write.box = */ ghostsToSend[ghost].recvBox,
/* write.ptr = */ NULL,
/* write.i = */ recv_i,
/* write.j = */ recv_j,
/* write.k = */ recv_k,
/* write.jStride = */ level->my_boxes[ghostsToSend[ghost].recvBox].jStride,
/* write.kStride = */ level->my_boxes[ghostsToSend[ghost].recvBox].kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);
else // append to the MPI pack list...
append_block_to_list(&(level->exchange_ghosts[justFaces].blocks[0]),&(level->exchange_ghosts[justFaces].allocated_blocks[0]),&(level->exchange_ghosts[justFaces].num_blocks[0]),
/* dim.i = */ dim_i,
/* dim.j = */ dim_j,
/* dim.k = */ dim_k,
/* read.box = */ ghostsToSend[ghost].sendBox,
/* read.ptr = */ NULL,
/* read.i = */ send_i,
/* read.j = */ send_j,
/* read.k = */ send_k,
/* read.jStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].jStride,
/* read.kStride = */ level->my_boxes[ghostsToSend[ghost].sendBox].kStride,
/* read.scale = */ 1,
/* write.box = */ -1,
/* write.ptr = */ level->exchange_ghosts[justFaces].send_buffers[neighbor], // NOTE, 1. count _sizes, 2. allocate _buffers, 3. populate blocks
/* write.i = */ level->exchange_ghosts[justFaces].send_sizes[neighbor], // current offset in the MPI send buffer
/* write.j = */ 0,
/* write.k = */ 0,
/* write.jStride = */ dim_i, // contiguous block
/* write.kStride = */ dim_i*dim_j, // contiguous block
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);}
if(neighbor>=0)level->exchange_ghosts[justFaces].send_sizes[neighbor]+=dim_i*dim_j*dim_k;
} // ghost for-loop
} // stage for-loop
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// free temporary storage...
free(ghostsToSend);
free(sendRanks);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// traverse my list of boxes and create a lists of neighboring boxes and neighboring ranks
GZ_type *ghostsToRecv = (GZ_type*)malloc(26*level->num_my_boxes*sizeof(GZ_type)); // There are at most 26 neighbors per box.
int *recvRanks = ( int*)malloc(26*level->num_my_boxes*sizeof( int)); // There are at most 26 neighbors per box.
if(level->num_my_boxes>0){
if(ghostsToRecv == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/ghostsToRecv\n");exit(0);}
if(recvRanks == NULL){fprintf(stderr,"malloc failed - build_exchange_ghosts/recvRanks \n");exit(0);}
}
numGhosts = 0;
numGhostsRemote = 0;
for(recvBox=0;recvBox<level->num_my_boxes;recvBox++){
int di,dj,dk;
for(dk=-1;dk<=1;dk++){
for(dj=-1;dj<=1;dj++){
for(di=-1;di<=1;di++){
int dir = 13+di+3*dj+9*dk;if(CommunicateThisDir[dir]){
int myBoxID = level->my_boxes[recvBox].global_box_id;
int myBox_i = level->my_boxes[recvBox].low.i / level->box_dim;
int myBox_j = level->my_boxes[recvBox].low.j / level->box_dim;
int myBox_k = level->my_boxes[recvBox].low.k / level->box_dim;
int neighborBoxID = -1;
if(level->boundary_condition.type == BC_PERIODIC){
int neighborBox_i = ( myBox_i + di + level->boxes_in.i) % level->boxes_in.i;
int neighborBox_j = ( myBox_j + dj + level->boxes_in.j) % level->boxes_in.j;
int neighborBox_k = ( myBox_k + dk + level->boxes_in.k) % level->boxes_in.k;
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}else{
int neighborBox_i = ( myBox_i + di );
int neighborBox_j = ( myBox_j + dj );
int neighborBox_k = ( myBox_k + dk );
if( (neighborBox_i>=0) && (neighborBox_i<level->boxes_in.i) &&
(neighborBox_j>=0) && (neighborBox_j<level->boxes_in.j) &&
(neighborBox_k>=0) && (neighborBox_k<level->boxes_in.k) ){ // i.e. the neighbor is a valid box
neighborBoxID = neighborBox_i + neighborBox_j*level->boxes_in.i + neighborBox_k*level->boxes_in.i*level->boxes_in.j;
}
}
if(neighborBoxID>=0){
if( (level->rank_of_box[neighborBoxID] != -1) && (level->rank_of_box[neighborBoxID] != level->my_rank) ){
ghostsToRecv[numGhosts].sendRank = level->rank_of_box[neighborBoxID];
ghostsToRecv[numGhosts].sendBoxID = neighborBoxID;
ghostsToRecv[numGhosts].sendBox = -1;
ghostsToRecv[numGhosts].sendDir = 26-dir;
ghostsToRecv[numGhosts].recvRank = level->my_rank;
ghostsToRecv[numGhosts].recvBoxID = myBoxID;
ghostsToRecv[numGhosts].recvBox = recvBox;
numGhosts++;
recvRanks[numGhostsRemote++] = level->rank_of_box[neighborBoxID];
}}
}}}}
}
// sort boxes by sendRank then by sendBoxID... ensures the recvs and receive buffers are always sorted by sendBoxID...
qsort(ghostsToRecv,numGhosts ,sizeof(GZ_type),qsortGZ );
// sort the lists of neighboring ranks and remove duplicates...
qsort(recvRanks ,numGhostsRemote,sizeof( int),qsortInt);
int numRecvRanks=0;_rank=-1;for(ghost=0;ghost<numGhostsRemote;ghost++)if(recvRanks[ghost] != _rank){_rank=recvRanks[ghost];recvRanks[numRecvRanks++]=recvRanks[ghost];}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// in a two-stage process, traverse the list of ghosts and allocate the unpack lists as well as the MPI buffers, and then populate the unpack list
level->exchange_ghosts[justFaces].num_recvs = numRecvRanks;
level->exchange_ghosts[justFaces].recv_ranks = (int*)malloc(numRecvRanks*sizeof(int));
level->exchange_ghosts[justFaces].recv_sizes = (int*)malloc(numRecvRanks*sizeof(int));
level->exchange_ghosts[justFaces].recv_buffers = (double**)malloc(numRecvRanks*sizeof(double*));
if(numRecvRanks>0){
if(level->exchange_ghosts[justFaces].recv_ranks ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_ranks\n",justFaces);exit(0);}
if(level->exchange_ghosts[justFaces].recv_sizes ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_sizes\n",justFaces);exit(0);}
if(level->exchange_ghosts[justFaces].recv_buffers==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_buffers\n",justFaces);exit(0);}
}
level->exchange_ghosts[justFaces].blocks[2] = NULL;
level->exchange_ghosts[justFaces].num_blocks[2] = 0;
level->exchange_ghosts[justFaces].allocated_blocks[2] = 0;
for(stage=0;stage<=1;stage++){
// stage=0... traverse the list and calculate the buffer sizes
// stage=1... allocate MPI recv buffers, traverse the list, and populate the unpack/local lists...
int neighbor;
for(neighbor=0;neighbor<numRecvRanks;neighbor++){
if(stage==1){
level->exchange_ghosts[justFaces].recv_buffers[neighbor] = (double*)malloc(level->exchange_ghosts[justFaces].recv_sizes[neighbor]*sizeof(double));
if(level->exchange_ghosts[justFaces].recv_sizes[neighbor]>0)
if(level->exchange_ghosts[justFaces].recv_buffers[neighbor]==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].recv_buffers[neighbor]\n",justFaces);exit(0);}
memset(level->exchange_ghosts[justFaces].recv_buffers[neighbor], 0,level->exchange_ghosts[justFaces].recv_sizes[neighbor]*sizeof(double));
}
level->exchange_ghosts[justFaces].recv_ranks[neighbor]=recvRanks[neighbor];
level->exchange_ghosts[justFaces].recv_sizes[neighbor]=0;
}
for(ghost=0;ghost<numGhosts;ghost++){
int dim_i=-1, dim_j=-1, dim_k=-1;
//int send_i=-1,send_j=-1,send_k=-1;
int recv_i=-1,recv_j=-1,recv_k=-1;
// decode ghostsToRecv[ghost].sendDir (direction sent) into di/dj/dk
int di = ((ghostsToRecv[ghost].sendDir % 3) )-1;
int dj = ((ghostsToRecv[ghost].sendDir % 9)/3)-1;
int dk = ((ghostsToRecv[ghost].sendDir / 9) )-1;
switch(di){ // direction relative to sender
case -1:dim_i=level->box_ghosts;recv_i= level->box_dim; break;
case 0:dim_i=level->box_dim; recv_i=0; break;
case 1:dim_i=level->box_ghosts;recv_i=0-level->box_ghosts;break;
}
switch(dj){ // direction relative to sender
case -1:dim_j=level->box_ghosts;recv_j= level->box_dim; break;
case 0:dim_j=level->box_dim; recv_j=0; break;
case 1:dim_j=level->box_ghosts;recv_j=0-level->box_ghosts;break;
}
switch(dk){ // direction relative to sender
case -1:dim_k=level->box_ghosts;recv_k= level->box_dim; break;
case 0:dim_k=level->box_dim; recv_k=0; break;
case 1:dim_k=level->box_ghosts;recv_k=0-level->box_ghosts;break;
}
// determine if this ghost requires a pack or local exchange
neighbor=0;while(level->exchange_ghosts[justFaces].recv_ranks[neighbor] != ghostsToRecv[ghost].sendRank)neighbor++;
if(stage==1)append_block_to_list(&(level->exchange_ghosts[justFaces].blocks[2]),&(level->exchange_ghosts[justFaces].allocated_blocks[2]),&(level->exchange_ghosts[justFaces].num_blocks[2]),
/*dim.i = */ dim_i,
/*dim.j = */ dim_j,
/*dim.k = */ dim_k,
/*read.box = */ -1,
/*read.ptr = */ level->exchange_ghosts[justFaces].recv_buffers[neighbor], // NOTE, 1. count _sizes, 2. allocate _buffers, 3. populate blocks
/*read.i = */ level->exchange_ghosts[justFaces].recv_sizes[neighbor], // current offset in the MPI recv buffer
/*read.j = */ 0,
/*read.k = */ 0,
/*read.jStride = */ dim_i, // contiguous block
/*read.kStride = */ dim_i*dim_j, // contiguous block
/*read.scale = */ 1,
/*write.box = */ ghostsToRecv[ghost].recvBox,
/*write.ptr = */ NULL,
/*write.i = */ recv_i,
/*write.j = */ recv_j,
/*write.k = */ recv_k,
/*write.jStride = */ level->my_boxes[ghostsToRecv[ghost].recvBox].jStride,
/*write.kStride = */ level->my_boxes[ghostsToRecv[ghost].recvBox].kStride,
/*write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);
if(neighbor>=0)level->exchange_ghosts[justFaces].recv_sizes[neighbor]+=dim_i*dim_j*dim_k;
} // ghost for-loop
} // stage for-loop
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// free temporary storage...
free(ghostsToRecv);
free(recvRanks);
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// malloc MPI requests/status arrays
#ifdef USE_MPI
level->exchange_ghosts[justFaces].requests = (MPI_Request*)malloc((level->exchange_ghosts[justFaces].num_sends+level->exchange_ghosts[justFaces].num_recvs)*sizeof(MPI_Request));
level->exchange_ghosts[justFaces].status = (MPI_Status *)malloc((level->exchange_ghosts[justFaces].num_sends+level->exchange_ghosts[justFaces].num_recvs)*sizeof(MPI_Status ));
if((level->exchange_ghosts[justFaces].num_sends+level->exchange_ghosts[justFaces].num_recvs)>0){
if(level->exchange_ghosts[justFaces].requests==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].requests\n",justFaces);exit(0);}
if(level->exchange_ghosts[justFaces].status ==NULL){fprintf(stderr,"malloc failed - exchange_ghosts[%d].status\n",justFaces);exit(0);}
}
#endif
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
//print_communicator(4,level->my_rank,0,&level->exchange_ghosts[justFaces]);
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
// create the pointers in level_type to the contiguous vector FP data (useful for bulk copies to/from accelerators)
// create the pointers in each box to their respective segment of the level's vector FP data (useful for box-relative operators)
// if( (level->numVectors > 0) && (numVectors > level->numVectors) ) then allocate additional space for (numVectors-level->numVectors) and copy old leve->numVectors data
void create_vectors(level_type *level, int numVectors){
if(numVectors <= level->numVectors)return; // already have enough space
double * old_vectors_base = level->vectors_base; // save a pointer to the originally allocated data for subsequent free()
double * old_vector0 = NULL;
if(level->numVectors>0)old_vector0 = level->vectors[0]; // save a pointer to old FP data to copy
// calculate the size of each box...
level->box_jStride = (level->box_dim+2*level->box_ghosts);while(level->box_jStride % BOX_ALIGN_JSTRIDE)level->box_jStride++; // pencil
level->box_kStride = level->box_jStride*(level->box_dim+2*level->box_ghosts);while(level->box_kStride % BOX_ALIGN_KSTRIDE)level->box_kStride++; // plane
level->box_volume = level->box_kStride*(level->box_dim+2*level->box_ghosts);while(level->box_volume % BOX_ALIGN_VOLUME )level->box_volume++; // volume
#define VECTOR_MALLOC_BULK
#ifdef VECTOR_MALLOC_BULK
// allocate one aligned, double-precision array and divide it among vectors...
uint64_t malloc_size = (uint64_t)numVectors*level->num_my_boxes*level->box_volume*sizeof(double) + 4096;
level->vectors_base = (double*)malloc(malloc_size);
level->memory_allocated += malloc_size;
if((numVectors>0)&&(level->vectors_base==NULL)){fprintf(stderr,"malloc failed - level->vectors_base\n");exit(0);}
double * tmpbuf = level->vectors_base;
while( (uint64_t)(tmpbuf+level->box_ghosts*(1+level->box_jStride+level->box_kStride)) & 0xff ){tmpbuf++;} // allign first *non-ghost* zone element of first component to a 256-Byte boundary
memset(tmpbuf, 0,(uint64_t)( numVectors)*level->num_my_boxes*level->box_volume*sizeof(double)); // zero to avoid 0.0*NaN or 0.0*Inf // FIX... omp thread ???
// if there is existing FP data... copy it, then free old data and pointer array
if(level->numVectors>0){
memcpy(tmpbuf,old_vector0,(uint64_t)(level->numVectors)*level->num_my_boxes*level->box_volume*sizeof(double)); // FIX... omp thread ???
if(old_vectors_base)free(old_vectors_base); // free old data...
}
// allocate an array of pointers which point to the union of boxes for each vector
// NOTE, this requires just one copyin per vector to an accelerator rather than requiring one copyin per box per vector
if(level->numVectors>0)free(level->vectors); // free any previously allocated vector array
level->vectors = (double **)malloc(numVectors*sizeof(double*));
if((numVectors>0)&&(level->vectors==NULL)){fprintf(stderr,"malloc failed - level->vectors\n");exit(0);}
int c;for(c=0;c<numVectors;c++){level->vectors[c] = tmpbuf + c*level->num_my_boxes*level->box_volume;}
#else
// allocate vectors individually (simple, but may cause conflict misses)
double ** old_vectors = level->vectors;
level->vectors = (double **)malloc(numVectors*sizeof(double*));
int c;
for(c= 0;c<level->numVectors;c++){level->vectors[c] = old_vectors[c];}
for(c=level->numVectors;c< numVectors;c++){level->vectors[c] = (double*)malloc(level->num_my_boxes*level->box_volume*sizeof(double));}
for(c=level->numVectors;c< numVectors;c++){memset(level->vectors[c],0,level->num_my_boxes*level->box_volume*sizeof(double));}
free(old_vectors);
#endif
// build the list of boxes...
int box=0;
int i,j,k;
for(k=0;k<level->boxes_in.k;k++){
for(j=0;j<level->boxes_in.j;j++){
for(i=0;i<level->boxes_in.i;i++){
int jStride = level->boxes_in.i;
int kStride = level->boxes_in.i*level->boxes_in.j;
int b=i + j*jStride + k*kStride;
if(level->rank_of_box[b]==level->my_rank){
if(level->numVectors>0)free(level->my_boxes[box].vectors); // free previously allocated vector array
level->my_boxes[box].vectors = (double **)malloc(numVectors*sizeof(double*));
if((numVectors>0)&&(level->my_boxes[box].vectors==NULL)){fprintf(stderr,"malloc failed - level->my_boxes[box].vectors\n");exit(0);}
int c;for(c=0;c<numVectors;c++){level->my_boxes[box].vectors[c] = level->vectors[c] + box*level->box_volume;}
level->my_boxes[box].numVectors = numVectors;
level->my_boxes[box].dim = level->box_dim;
level->my_boxes[box].ghosts = level->box_ghosts;
level->my_boxes[box].jStride = level->box_jStride;
level->my_boxes[box].kStride = level->box_kStride;
level->my_boxes[box].volume = level->box_volume;
level->my_boxes[box].low.i = i*level->box_dim;
level->my_boxes[box].low.j = j*level->box_dim;
level->my_boxes[box].low.k = k*level->box_dim;
level->my_boxes[box].global_box_id = b;
box++;
}}}}
// level now has created/initialized vector FP data
level->numVectors = numVectors;
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void create_level(level_type *level, int boxes_in_i, int box_dim, int box_ghosts, int numVectors, int domain_boundary_condition, int my_rank, int num_ranks){
int box;
int TotalBoxes = boxes_in_i*boxes_in_i*boxes_in_i;
if(my_rank==0){
if(domain_boundary_condition==BC_DIRICHLET)fprintf(stdout,"\nattempting to create a %d^3 level (with Dirichlet BC) using a %d^3 grid of %d^3 boxes and %d tasks...\n",box_dim*boxes_in_i,boxes_in_i,box_dim,num_ranks);
if(domain_boundary_condition==BC_PERIODIC )fprintf(stdout,"\nattempting to create a %d^3 level (with Periodic BC) using a %d^3 grid of %d^3 boxes and %d tasks...\n", box_dim*boxes_in_i,boxes_in_i,box_dim,num_ranks);
}
int omp_threads = 1;
int omp_nested = 0;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
omp_threads = omp_get_num_threads();
omp_nested = omp_get_nested();
}
}
#endif
if(box_ghosts < stencil_get_radius() ){
if(my_rank==0)fprintf(stderr,"ghosts(%d) must be >= stencil_get_radius(%d)\n",box_ghosts,stencil_get_radius());
exit(0);
}
level->memory_allocated = 0;
level->box_dim = box_dim;
level->box_ghosts = box_ghosts;
level->numVectors = 0; // no vectors have been allocated yet
level->vectors_base = NULL; // pointer returned by bulk malloc
level->vectors = NULL; // pointers to individual vectors
level->boxes_in.i = boxes_in_i;
level->boxes_in.j = boxes_in_i;
level->boxes_in.k = boxes_in_i;
level->dim.i = box_dim*level->boxes_in.i;
level->dim.j = box_dim*level->boxes_in.j;
level->dim.k = box_dim*level->boxes_in.k;
level->active = 1;
level->my_rank = my_rank;
level->num_ranks = num_ranks;
level->boundary_condition.type = domain_boundary_condition;
level->alpha_is_zero = -1;
level->num_threads = omp_threads;
// intra-box threading...
level->threads_per_box = omp_threads;
level->concurrent_boxes = 1;
// inter-box threading...
//level->threads_per_box = 1;
//level->concurrent_boxes = omp_threads;
level->my_blocks = NULL;
level->num_my_blocks = 0;
level->allocated_blocks = 0;
level->tag = log2(level->dim.i);
// allocate 3D array of integers to hold the MPI rank of the corresponding box and initialize to -1 (unassigned)
level->rank_of_box = (int*)malloc(level->boxes_in.i*level->boxes_in.j*level->boxes_in.k*sizeof(int));
if(level->rank_of_box==NULL){fprintf(stderr,"malloc of level->rank_of_box failed\n");exit(0);}
level->memory_allocated += (level->boxes_in.i*level->boxes_in.j*level->boxes_in.k*sizeof(int));
for(box=0;box<level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;box++){level->rank_of_box[box]=-1;} // -1 denotes that there is no actual box assigned to this region
// parallelize the grid (i.e. assign a process rank to each box)...
#ifdef DECOMPOSE_LEX
decompose_level_lex(level->rank_of_box,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,num_ranks);
#elif DECOMPOSE_BISECTION_SPECIAL
decompose_level_bisection_special(level->rank_of_box,level->boxes_in.i,level->boxes_in.i*level->boxes_in.j,0,0,0,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,0,num_ranks);
#else
decompose_level_bisection(level->rank_of_box,level->boxes_in.i,level->boxes_in.i*level->boxes_in.j,0,0,0,level->boxes_in.i,level->boxes_in.j,level->boxes_in.k,num_ranks,0,level->boxes_in.i*level->boxes_in.j*level->boxes_in.k);
#endif
//print_decomposition(level);// for debug purposes only
// calculate how many boxes I own...
level->num_my_boxes=0;
for(box=0;box<level->boxes_in.i*level->boxes_in.j*level->boxes_in.k;box++){if(level->rank_of_box[box]==level->my_rank)level->num_my_boxes++;}
level->my_boxes = (box_type*)malloc(level->num_my_boxes*sizeof(box_type));
if((level->num_my_boxes>0)&&(level->my_boxes==NULL)){fprintf(stderr,"malloc failed - create_level/level->my_boxes\n");exit(0);}
// allocate flattened vector FP data and create pointers...
create_vectors(level,numVectors);
// Build and auxilarlly data structure that flattens boxes into blocks...
for(box=0;box<level->num_my_boxes;box++){
append_block_to_list(&(level->my_blocks),&(level->allocated_blocks),&(level->num_my_blocks),
/* dim.i = */ level->my_boxes[box].dim,
/* dim.j = */ level->my_boxes[box].dim,
/* dim.k = */ level->my_boxes[box].dim,
/* read.box = */ box,
/* read.ptr = */ NULL,
/* read.i = */ 0,
/* read.j = */ 0,
/* read.k = */ 0,
/* read.jStride = */ level->my_boxes[box].jStride,
/* read.kStride = */ level->my_boxes[box].kStride,
/* read.scale = */ 1,
/* write.box = */ box,
/* write.ptr = */ NULL,
/* write.i = */ 0,
/* write.j = */ 0,
/* write.k = */ 0,
/* write.jStride = */ level->my_boxes[box].jStride,
/* write.kStride = */ level->my_boxes[box].kStride,
/* write.scale = */ 1,
/* blockcopy_i = */ BLOCKCOPY_TILE_I, // default
/* blockcopy_j = */ BLOCKCOPY_TILE_J, // default
/* blockcopy_k = */ BLOCKCOPY_TILE_K, // default
/* subtype = */ 0
);
}
// Tune the OpenMP style of parallelism...
if(omp_nested){
#ifndef OMP_STENCILS_PER_THREAD
#define OMP_STENCILS_PER_THREAD 64
#endif
level->concurrent_boxes = level->num_my_boxes;
if(level->concurrent_boxes > omp_threads)level->concurrent_boxes = omp_threads;
if(level->concurrent_boxes < 1)level->concurrent_boxes = 1;
level->threads_per_box = omp_threads / level->concurrent_boxes;
if(level->threads_per_box > level->box_dim*level->box_dim)
level->threads_per_box = level->box_dim*level->box_dim; // JK collapse
if(level->threads_per_box > level->box_dim*level->box_dim*level->box_dim/OMP_STENCILS_PER_THREAD )
level->threads_per_box = level->box_dim*level->box_dim*level->box_dim/OMP_STENCILS_PER_THREAD;
if(level->threads_per_box<1)level->threads_per_box = 1;
}else{
if(level->num_my_boxes>8){level->concurrent_boxes=omp_threads;level->threads_per_box=1;}
}
if(my_rank==0){
if(omp_nested)fprintf(stdout," OMP_NESTED=TRUE OMP_NUM_THREADS=%d ... %d teams of %d threads\n",omp_threads,level->concurrent_boxes,level->threads_per_box);
else fprintf(stdout," OMP_NESTED=FALSE OMP_NUM_THREADS=%d ... %d teams of %d threads\n",omp_threads,level->concurrent_boxes,level->threads_per_box);
}
// build an assists data structure which specifies which cells are within the domain (used with STENCIL_FUSE_BC)
initialize_valid_region(level);
// build an assist structure for Gauss Seidel Red Black that would facilitate unrolling and SIMDization...
if(level->num_my_boxes){
int i,j;
int kStride = level->my_boxes[0].kStride;
int jStride = level->my_boxes[0].jStride;
//posix_memalign((void**)&(level->RedBlack_FP[0] ),64,kStride*sizeof(double )); // even planes
//posix_memalign((void**)&(level->RedBlack_FP[1] ),64,kStride*sizeof(double ));
// FIX... align RedBlack_FP the same as elements within a plane (i.e. BOX_SIMD_ALIGNMENT)
level->RedBlack_FP[0] = (double*)malloc(kStride*sizeof(double));
level->RedBlack_FP[1] = (double*)malloc(kStride*sizeof(double));
memset(level->RedBlack_FP[0],0,kStride*sizeof(double));
memset(level->RedBlack_FP[1],0,kStride*sizeof(double));
level->memory_allocated += kStride*sizeof(double);
level->memory_allocated += kStride*sizeof(double);
for(j=0-level->box_ghosts;j<level->box_dim+level->box_ghosts;j++){
for(i=0-level->box_ghosts;i<level->box_dim+level->box_ghosts;i++){
int ij = (i+level->box_ghosts) + (j+level->box_ghosts)*jStride;
// if((i^j)&0x1)level->RedBlack_64bMask[ij]= ~0;else level->RedBlack_64bMask[ij]= 0; // useful for blend instructions
if((i^j^1)&0x1)level->RedBlack_FP[ 0][ij]=1.0;else level->RedBlack_FP[ 0][ij]=0.0;
if((i^j^1)&0x1)level->RedBlack_FP[ 1][ij]=0.0;else level->RedBlack_FP[ 1][ij]=1.0;
}}
}
// create mini programs that affect ghost zone exchanges
level->exchange_ghosts[0].num_recvs =0;
level->exchange_ghosts[0].num_sends =0;
level->exchange_ghosts[0].num_blocks[0]=0;
level->exchange_ghosts[0].num_blocks[1]=0;
level->exchange_ghosts[0].num_blocks[2]=0;
level->exchange_ghosts[1].num_recvs =0;
level->exchange_ghosts[1].num_sends =0;
level->exchange_ghosts[1].num_blocks[0]=0;
level->exchange_ghosts[1].num_blocks[1]=0;
level->exchange_ghosts[1].num_blocks[2]=0;
build_exchange_ghosts(level,0); // faces, edges, corners
build_exchange_ghosts(level,1); // justFaces
build_boundary_conditions(level,0); // faces, edges, corners
build_boundary_conditions(level,1); // just faces
// duplicate MPI_COMM_WORLD to be the communicator for each level
#ifdef USE_MPI
if(my_rank==0){fprintf(stdout," Duplicating MPI_COMM_WORLD...");fflush(stdout);}
double time_start = MPI_Wtime();
MPI_Comm_dup(MPI_COMM_WORLD,&level->MPI_COMM_ALLREDUCE);
double time_end = MPI_Wtime();
double time_in_comm_dup = 0;
double time_in_comm_dup_send = time_end-time_start;
MPI_Allreduce(&time_in_comm_dup_send,&time_in_comm_dup,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD);
if(my_rank==0){fprintf(stdout,"done (%0.6f seconds)\n",time_in_comm_dup);fflush(stdout);}
#endif
// report on potential load imbalance
int BoxesPerProcess = level->num_my_boxes;
#ifdef USE_MPI
int BoxesPerProcessSend = level->num_my_boxes;
MPI_Allreduce(&BoxesPerProcessSend,&BoxesPerProcess,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD);
#endif
if(my_rank==0){fprintf(stdout," Calculating boxes per process... target=%0.3f, max=%d\n",(double)TotalBoxes/(double)num_ranks,BoxesPerProcess);}
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void reset_level_timers(level_type *level){
// cycle counters information...
level->cycles.smooth = 0;
level->cycles.apply_op = 0;
level->cycles.residual = 0;
level->cycles.blas1 = 0;
level->cycles.blas3 = 0;
level->cycles.boundary_conditions = 0;
level->cycles.restriction_total = 0;
level->cycles.restriction_pack = 0;
level->cycles.restriction_local = 0;
level->cycles.restriction_unpack = 0;
level->cycles.restriction_recv = 0;
level->cycles.restriction_send = 0;
level->cycles.restriction_wait = 0;
level->cycles.interpolation_total = 0;
level->cycles.interpolation_pack = 0;
level->cycles.interpolation_local = 0;
level->cycles.interpolation_unpack = 0;
level->cycles.interpolation_recv = 0;
level->cycles.interpolation_send = 0;
level->cycles.interpolation_wait = 0;
level->cycles.ghostZone_total = 0;
level->cycles.ghostZone_pack = 0;
level->cycles.ghostZone_local = 0;
level->cycles.ghostZone_unpack = 0;
level->cycles.ghostZone_recv = 0;
level->cycles.ghostZone_send = 0;
level->cycles.ghostZone_wait = 0;
level->cycles.collectives = 0;
level->cycles.Total = 0;
// solver events information...
level->Krylov_iterations = 0;
level->CAKrylov_formations_of_G = 0;
level->vcycles_from_this_level = 0;
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void max_level_timers(level_type *level){
uint64_t temp;
#ifdef USE_MPI
temp=level->cycles.smooth; MPI_Allreduce(&temp,&level->cycles.smooth ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.apply_op; MPI_Allreduce(&temp,&level->cycles.apply_op ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.residual; MPI_Allreduce(&temp,&level->cycles.residual ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.blas1; MPI_Allreduce(&temp,&level->cycles.blas1 ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.blas3; MPI_Allreduce(&temp,&level->cycles.blas3 ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.boundary_conditions; MPI_Allreduce(&temp,&level->cycles.boundary_conditions ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_total; MPI_Allreduce(&temp,&level->cycles.restriction_total ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_pack; MPI_Allreduce(&temp,&level->cycles.restriction_pack ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_local; MPI_Allreduce(&temp,&level->cycles.restriction_local ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_unpack; MPI_Allreduce(&temp,&level->cycles.restriction_unpack ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_recv; MPI_Allreduce(&temp,&level->cycles.restriction_recv ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_send; MPI_Allreduce(&temp,&level->cycles.restriction_send ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.restriction_wait; MPI_Allreduce(&temp,&level->cycles.restriction_wait ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_total; MPI_Allreduce(&temp,&level->cycles.interpolation_total ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_pack; MPI_Allreduce(&temp,&level->cycles.interpolation_pack ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_local; MPI_Allreduce(&temp,&level->cycles.interpolation_local ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_unpack;MPI_Allreduce(&temp,&level->cycles.interpolation_unpack,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_recv; MPI_Allreduce(&temp,&level->cycles.interpolation_recv ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_send; MPI_Allreduce(&temp,&level->cycles.interpolation_send ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.interpolation_wait; MPI_Allreduce(&temp,&level->cycles.interpolation_wait ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_total; MPI_Allreduce(&temp,&level->cycles.ghostZone_total ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_pack; MPI_Allreduce(&temp,&level->cycles.ghostZone_pack ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_local; MPI_Allreduce(&temp,&level->cycles.ghostZone_local ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_unpack; MPI_Allreduce(&temp,&level->cycles.ghostZone_unpack ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_recv; MPI_Allreduce(&temp,&level->cycles.ghostZone_recv ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_send; MPI_Allreduce(&temp,&level->cycles.ghostZone_send ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.ghostZone_wait; MPI_Allreduce(&temp,&level->cycles.ghostZone_wait ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.collectives; MPI_Allreduce(&temp,&level->cycles.collectives ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
temp=level->cycles.Total; MPI_Allreduce(&temp,&level->cycles.Total ,1,MPI_UINT64_T,MPI_MAX,MPI_COMM_WORLD);
#endif
}
//---------------------------------------------------------------------------------------------------------------------------------------------------
void destroy_level(level_type *level){
// FIX !!!
if(level->rank_of_box)free(level->rank_of_box);
#ifdef VECTOR_MALLOC_BULK
if(level->vectors_base)free(level->vectors_base);
#else
int c;for(c=0;c<level->numVectors;c++)if(level->vectors[c])free(level->vectors[c]);
#endif
if(level->vectors)free(level->vectors);
}
|
otbSampleAugmentation.h | /*
* Copyright (C) 2005-2019 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef otbSampleAugmentation_h
#define otbSampleAugmentation_h
#ifdef _OPENMP
#include <omp.h>
#endif
#include <vector>
#include <algorithm>
#include <random>
#include <ctime>
#include <cassert>
namespace otb
{
namespace sampleAugmentation
{
using SampleType = std::vector<double>;
using SampleVectorType = std::vector<SampleType>;
/**
Estimate standard deviations of the components in one pass using
Welford's algorithm
*/
SampleType EstimateStds(const SampleVectorType& samples)
{
const auto nbSamples = samples.size();
const long nbComponents = static_cast<long>(samples[0].size());
SampleType stds(nbComponents, 0.0);
SampleType means(nbComponents, 0.0);
for (size_t i = 0; i < nbSamples; ++i)
{
auto norm_factor = 1.0 / (i + 1);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (long j = 0; j < nbComponents; ++j)
{
const auto mu = means[j];
const auto x = samples[i][j];
auto muNew = mu + (x - mu) * norm_factor;
stds[j] += (x - mu) * (x - muNew);
means[j] = muNew;
}
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (long j = 0; j < nbComponents; ++j)
{
stds[j] = std::sqrt(stds[j] / nbSamples);
}
return stds;
}
/** Create new samples by replicating input samples. We loop through
* the input samples and add them to the new data set until nbSamples
* are added. The elements of newSamples are removed before proceeding.
*/
void ReplicateSamples(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples)
{
newSamples.resize(nbSamples);
const long long nbSamplesLL = static_cast<long long>(nbSamples);
size_t imod{0};
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (long long i = 0; i < nbSamplesLL; ++i)
{
if (imod == inSamples.size())
imod = 0;
newSamples[i] = inSamples[imod++];
}
}
/** Create new samples by adding noise to existing samples. Gaussian
* noise is added to randomly selected samples. The standard deviation
* of the noise added to each component is the same as the one of the
* input variables divided by stdFactor (defaults to 10). The
* elements of newSamples are removed before proceeding.
*/
void JitterSamples(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples, float stdFactor = 10,
const int seed = std::time(nullptr))
{
newSamples.resize(nbSamples);
const long nbComponents = static_cast<long>(inSamples[0].size());
std::random_device rd;
std::mt19937 gen(rd());
// The input samples are selected randomly with replacement
std::srand(seed);
// We use one gaussian distribution per component since they may
// have different stds
auto stds = EstimateStds(inSamples);
std::vector<std::normal_distribution<double>> gaussDis(nbComponents);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (long i = 0; i < nbComponents; ++i)
gaussDis[i] = std::normal_distribution<double>{0.0, stds[i] / stdFactor};
for (size_t i = 0; i < nbSamples; ++i)
{
newSamples[i] = inSamples[std::rand() % inSamples.size()];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (long j = 0; j < nbComponents; ++j)
newSamples[i][j] += gaussDis[j](gen);
}
}
struct NeighborType
{
size_t index;
double distance;
};
struct NeighborSorter
{
constexpr bool operator()(const NeighborType& a, const NeighborType& b) const
{
return b.distance > a.distance;
}
};
double ComputeSquareDistance(const SampleType& x, const SampleType& y)
{
assert(x.size() == y.size());
double dist{0};
for (size_t i = 0; i < x.size(); ++i)
{
dist += (x[i] - y[i]) * (x[i] - y[i]);
}
return dist / (x.size() * x.size());
}
using NNIndicesType = std::vector<NeighborType>;
using NNVectorType = std::vector<NNIndicesType>;
/** Returns the indices of the nearest neighbors for each input sample
*/
void FindKNNIndices(const SampleVectorType& inSamples, const size_t nbNeighbors, NNVectorType& nnVector)
{
const long long nbSamples = static_cast<long long>(inSamples.size());
nnVector.resize(nbSamples);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (long long sampleIdx = 0; sampleIdx < nbSamples; ++sampleIdx)
{
NNIndicesType nns;
for (long long neighborIdx = 0; neighborIdx < nbSamples; ++neighborIdx)
{
if (sampleIdx != neighborIdx)
nns.push_back({static_cast<size_t>(neighborIdx), ComputeSquareDistance(inSamples[sampleIdx], inSamples[neighborIdx])});
}
std::partial_sort(nns.begin(), nns.begin() + nbNeighbors, nns.end(), NeighborSorter{});
nns.resize(nbNeighbors);
nnVector[sampleIdx] = std::move(nns);
}
}
/** Generate the new sample in the line linking s1 and s2
*/
SampleType SmoteCombine(const SampleType& s1, const SampleType& s2, double position)
{
auto result = s1;
for (size_t i = 0; i < s1.size(); ++i)
result[i] = s1[i] + (s2[i] - s1[i]) * position;
return result;
}
/** Create new samples using the SMOTE algorithm
Chawla, N. V., Bowyer, K. W., Hall, L. O., & Kegelmeyer, W. P., Smote:
synthetic minority over-sampling technique, Journal of artificial
intelligence research, 16(), 321–357 (2002).
http://dx.doi.org/10.1613/jair.953
*/
void Smote(const SampleVectorType& inSamples, const size_t nbSamples, SampleVectorType& newSamples, const int nbNeighbors, const int seed = std::time(nullptr))
{
newSamples.resize(nbSamples);
const long long nbSamplesLL = static_cast<long long>(nbSamples);
NNVectorType nnVector;
FindKNNIndices(inSamples, nbNeighbors, nnVector);
// The input samples are selected randomly with replacement
std::srand(seed);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (long long i = 0; i < nbSamplesLL; ++i)
{
const auto sampleIdx = std::rand() % (inSamples.size());
const auto sample = inSamples[sampleIdx];
const auto neighborIdx = nnVector[sampleIdx][std::rand() % nbNeighbors].index;
const auto neighbor = inSamples[neighborIdx];
newSamples[i] = SmoteCombine(sample, neighbor, std::rand() / double{RAND_MAX});
}
}
} // end namespaces sampleAugmentation
} // end namespace otb
#endif
|
SumaVectoresCParallelFor.c | /*
SumaVectoresC.c
---------------
Suma de dos vectores: v3 = v1 + v2
Para compilar usar (-lrt: real time library): gcc -fopenmp -O2 SumaVectoresCParallelFor.c -o SumaVectoresCParallelFor -lrt
Para ejecutar use: SumaVectoresC longitud
Para obtener el ensamblador compilar con : gcc -O2 SumaVectoresCParallelFor.c -S -o SumaVectoresCParallelFor.s -lrt
*/
#include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#include <time.h> // biblioteca donde se encuentra la función clock_gettime()
#include <omp.h> // biblioteca donde se encuentran las directivas de openmp
//#define PRINTF_ALL // comentar para quitar el printf que imprime todos los componentes
//Sólo puede estar definida una de las tres constantes VECTOR_ (sólo uno de los tres defines siguientes puede estar descomentado):
//#define VECTOR_LOCAL // descomentar para que los vectores sean variables locales (si se supera el tamaño de la pila se generará el error "Violación de Segmento")
//#define VECTOR_GLOBAL// descomentar para que los vectores sean variables globales (su longitud no estará limitada por el tamaño de la pila del programa)
#define VECTOR_DYNAMIC // descomentar para que los vectores sean variables dinámicas (memoria reutilizable durante la ejecución)
#ifdef VECTOR_GLOBAL
#define MAX 33554432 //=2^25
//#define MAX 4294967295 //=2^32-1
double v1[MAX], v2[MAX], v3[MAX];
#endif
int main(int argc, char** argv){
int i;
double cgt1, cgt2, ncgt; //para tiempo de ejecución
//Leer argumento de entrada (no de componentes del vector)
if (argc<2){
printf("Faltan no componentes del vector\n");
exit(-1);
}
unsigned int N = atoi(argv[1]); // Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B)
printf("================================================\n");
printf ("Tamaño en bytes: %lu \n",N*sizeof(double));
#ifdef VECTOR_LOCAL
double v1[N], v2[N], v3[N]; // Tamaño variable local en tiempo de ejecución disponible en C a partir de actualización C99
#endif
#ifdef VECTOR_GLOBAL
if (N>MAX) N=MAX;
#endif
#ifdef VECTOR_DYNAMIC
double *v1, *v2, *v3;
v1 = (double*) malloc(N*sizeof(double));// malloc necesita el tamaño en bytes
v2 = (double*) malloc(N*sizeof(double)); //si no hay espacio suficiente malloc devuelve NULL
v3 = (double*) malloc(N*sizeof(double));
if ( (v1==NULL) || (v2==NULL) || (v3==NULL) ){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
#endif
//Inicializar vectores
#pragma omp parallel for
for(i=0; i<N; i++){
v1[i] = N*0.1+i*0.1; v2[i] = N*0.1-i*0.1; //los valores dependen de N
}
cgt1 = omp_get_wtime();
//Calcular suma de vectores
#pragma omp parallel for
for(i=0; i<N; i++)
v3[i] = v1[i] + v2[i];
cgt2 = omp_get_wtime();
ncgt = (double)(cgt2 - cgt1);
//Imprimir resultado de la suma y el tiempo de ejecución
#ifdef PRINTF_ALL
printf("Tiempo(seg.): %11.9f\t Tamaño Vectores: %u\n",ncgt,N);
for(i=0; i<N; i++)
printf("V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) \n",i,i,i,v1[i],v2[i],v3[i]);
#else
printf("Tiempo(seg.):%11.9f\t Tamaño Vectores:%u\nV1[0]+V2[0]=V3[0](%8.6f+%8.6f=%8.6f) \nV1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f) \n\n",ncgt,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]);
#endif
#ifdef VECTOR_DYNAMIC
free(v1); // libera el espacio reservado para v1
free(v2); // libera el espacio reservado para v2
free(v3); // libera el espacio reservado para v3
#endif
return 0;
} |
spmv_int.c | ////Example of sparse matrix-vector multiply, using CSR (compressed sparse row format).
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// Add timing support
#include <sys/timeb.h>
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
//#define DEFAULT_DIMSIZE 256
void print_array(char *title, char *name, int *A, int n, int m) {
printf("%s:\n", title);
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%s[%d][%d]:%d ", name, i, j, A[i * m + j]);
}
printf("\n");
}
printf("\n");
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
int main(int argc, char *argv[]) {
int *ia, *ja;
int *a, *x, *y;
int row, i, j, idx, n, nnzMax, nnz, nrows;
int ts, t, rate;
n = 10240;
//n = 24;
if (argc > 1) n = atoi(argv[1]);
nrows = n * n;
nnzMax = nrows * 5;
ia = (int*)malloc(nrows*sizeof(int));
ja = (int*)malloc(nnzMax*sizeof(int));
a = (int*)malloc(nnzMax*sizeof(int));
/* Allocate the source and result vectors */
x = (int*)malloc(nrows*sizeof(int));
y = (int*)malloc(nrows*sizeof(int));
row = 0;
nnz = 0;
for (i=0; i<n; i++) {
for (j=0; j<n; j++) {
ia[row] = nnz;
if (i>0) { ja[nnz] = row - n; a[nnz] = -1.0; nnz++; }
if (j>0) { ja[nnz] = row - 1; a[nnz] = -1.0; nnz++; }
ja[nnz] = row; a[nnz] = 4.0; nnz++;
if (j<n-1) { ja[nnz] = row + 1; a[nnz] = -1.0; nnz++; }
if (i<n-1) { ja[nnz] = row + n; a[nnz] = -1.0; nnz++; }
row++;
}
}
ia[row] = nnz;
/* Create the source (x) vector */
for (i=0; i<nrows; i++) x[i] = 1.0;
double elapsed = read_timer();
int flops = 0;
for (row=0; row<nrows; row++) {
int sum = 0.0;
#pragma omp simd reduction(+:sum,flops)
for (idx=ia[row]; idx<ia[row+1]; idx++) {
sum += a[idx] * x[ja[idx]];
flops += 2;
}
y[row] = sum;
}
elapsed = read_timer() - elapsed;
double gflops = flops / (1.0e9 * elapsed);
printf("seq elasped time(s): %.4f\n", elapsed);
printf("GFlops: %.4f\n", gflops);
int errors = 0;
for (row=0; row<nrows; row++) {
if (y[row] < 0) {
//fprintf(stderr,"y[%d]=%f, fails consistency test\n", row, y[row]);
++errors;
}
}
printf("Errors: %d\n", errors);
free(ia); free(ja); free(a); free(x); free(y);
return 0;
}
|
GB_unaryop__lnot_int64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int64_int16
// op(A') function: GB_tran__lnot_int64_int16
// C type: int64_t
// A type: int16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int64_int16
(
int64_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csr.c | /*!
* \file
*
* \brief Various routines with dealing with CSR matrices
*
* \author George Karypis
*/
#include <GKlib.h>
#define OMPMINOPS 50000
/*************************************************************************/
/*! Allocate memory for a CSR matrix and initializes it
\returns the allocated matrix. The various fields are set to NULL.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Create()
{
gk_csr_t *mat;
mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat");
gk_csr_Init(mat);
return mat;
}
/*************************************************************************/
/*! Initializes the matrix
\param mat is the matrix to be initialized.
*/
/*************************************************************************/
void gk_csr_Init(gk_csr_t *mat)
{
memset(mat, 0, sizeof(gk_csr_t));
mat->nrows = mat->ncols = -1;
}
/*************************************************************************/
/*! Frees all the memory allocated for matrix.
\param mat is the matrix to be freed.
*/
/*************************************************************************/
void gk_csr_Free(gk_csr_t **mat)
{
if (*mat == NULL)
return;
gk_csr_FreeContents(*mat);
gk_free((void **)mat, LTERM);
}
/*************************************************************************/
/*! Frees only the memory allocated for the matrix's different fields and
sets them to NULL.
\param mat is the matrix whose contents will be freed.
*/
/*************************************************************************/
void gk_csr_FreeContents(gk_csr_t *mat)
{
gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids,
&mat->colptr, &mat->colind, &mat->colval, &mat->colids,
&mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums,
&mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols,
&mat->rwgts, &mat->cwgts,
LTERM);
}
/*************************************************************************/
/*! Returns a copy of a matrix.
\param mat is the matrix to be duplicated.
\returns the newly created copy of the matrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Dup(gk_csr_t *mat)
{
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr,
gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr"));
if (mat->rowids)
nmat->rowids = gk_icopy(mat->nrows, mat->rowids,
gk_imalloc(mat->nrows, "gk_csr_Dup: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms"));
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind,
gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval,
gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval"));
/* copy the col structure */
if (mat->colptr)
nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr,
gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr"));
if (mat->colids)
nmat->colids = gk_icopy(mat->ncols, mat->colids,
gk_imalloc(mat->ncols, "gk_csr_Dup: colids"));
if (mat->cnorms)
nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms"));
if (mat->colind)
nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind,
gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind"));
if (mat->colval)
nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval,
gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containint a set of consecutive rows.
\param mat is the original matrix.
\param rstart is the starting row.
\param nrows is the number of rows from rstart to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows)
{
ssize_t i;
gk_csr_t *nmat;
if (rstart+nrows > mat->nrows)
return NULL;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart,
gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr"));
for (i=nrows; i>=0; i--)
nmat->rowptr[i] -= nmat->rowptr[0];
ASSERT(nmat->rowptr[0] == 0);
if (mat->rowids)
nmat->rowids = gk_icopy(nrows, mat->rowids+rstart,
gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms"));
if (mat->rsums)
nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums"));
ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]);
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowind+mat->rowptr[rstart],
gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowval+mat->rowptr[rstart],
gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containing a certain set of rows.
\param mat is the original matrix.
\param nrows is the number of rows to extract.
\param rind is the set of row numbers to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind)
{
ssize_t i, ii, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<nrows; i++)
nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]];
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, ii=0; ii<nrows; ii++) {
i = rind[ii];
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix corresponding to a specified partitioning of rows.
\param mat is the original matrix.
\param part is the partitioning vector of the rows.
\param pid is the partition ID that will be extracted.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid)
{
ssize_t i, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = 0;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
nmat->nrows++;
nnz += mat->rowptr[i+1]-mat->rowptr[i];
}
}
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Splits the matrix into multiple sub-matrices based on the provided
color array.
\param mat is the original matrix.
\param color is an array of size equal to the number of non-zeros
in the matrix (row-wise structure). The matrix is split into
as many parts as the number of colors. For meaningfull results,
the colors should be numbered consecutively starting from 0.
\returns an array of matrices for each supplied color number.
*/
/**************************************************************************/
gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color)
{
ssize_t i, j;
int nrows, ncolors;
ssize_t *rowptr;
int *rowind;
float *rowval;
gk_csr_t **smats;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
ncolors = gk_imax(rowptr[nrows], color)+1;
smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats");
for (i=0; i<ncolors; i++) {
smats[i] = gk_csr_Create();
smats[i]->nrows = mat->nrows;
smats[i]->ncols = mat->ncols;
smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
smats[color[j]]->rowptr[i]++;
}
for (i=0; i<ncolors; i++)
MAKECSR(j, nrows, smats[i]->rowptr);
for (i=0; i<ncolors; i++) {
smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind");
smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j];
smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j];
smats[color[j]]->rowptr[i]++;
}
}
for (i=0; i<ncolors; i++)
SHIFTCSR(j, nrows, smats[i]->rowptr);
return smats;
}
/**************************************************************************/
/*! Reads a CSR matrix from the supplied file and stores it the matrix's
forward structure.
\param filename is the file that stores the data.
\param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO,
GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL
specifying the type of the input format.
The GK_CSR_FMT_CSR does not contain a header
line, whereas the GK_CSR_FMT_BINROW is a binary format written
by gk_csr_Write() using the same format specifier.
\param readvals is either 1 or 0, indicating if the CSR file contains
values or it does not. It only applies when GK_CSR_FMT_CSR is
used.
\param numbering is either 1 or 0, indicating if the numbering of the
indices start from 1 or 0, respectively. If they start from 1,
they are automatically decreamented during input so that they
will start from 0. It only applies when GK_CSR_FMT_CSR is
used.
\returns the matrix that was read.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering)
{
ssize_t i, k, l;
size_t nfields, nrows, ncols, nnz, fmt, ncon;
size_t lnlen;
ssize_t *rowptr;
int *rowind, ival;
float *rowval=NULL, fval;
int readsizes, readwgts;
char *line=NULL, *head, *tail, fmtstr[256];
FILE *fpin;
gk_csr_t *mat=NULL;
if (!gk_fexists(filename))
gk_errexit(SIGERR, "File %s does not exist!\n", filename);
if (format == GK_CSR_FMT_BINROW) {
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr");
if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1)
gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename);
mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind");
if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename);
if (readvals == 1) {
mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval");
if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
}
if (format == GK_CSR_FMT_BINCOL) {
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr");
if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1)
gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename);
mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind");
if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename);
if (readvals) {
mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval");
if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
}
if (format == GK_CSR_FMT_CLUTO) {
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3)
gk_errexit(SIGERR, "Header line must contain 3 integers.\n");
readsizes = 0;
readwgts = 0;
readvals = 1;
numbering = 1;
}
else if (format == GK_CSR_FMT_METIS) {
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
fmt = ncon = 0;
nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon);
if (nfields < 2)
gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n");
ncols = nrows;
nnz *= 2;
if (fmt > 111)
gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt);
sprintf(fmtstr, "%03zu", fmt%1000);
readsizes = (fmtstr[0] == '1');
readwgts = (fmtstr[1] == '1');
readvals = (fmtstr[2] == '1');
numbering = 1;
ncon = (ncon == 0 ? 1 : ncon);
}
else {
readsizes = 0;
readwgts = 0;
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && nnz%2 == 1)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals);
if (readvals == 1)
nnz = nnz/2;
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
}
mat = gk_csr_Create();
mat->nrows = nrows;
rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr");
rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind");
if (readvals != 2)
rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval");
if (readsizes)
mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes");
if (readwgts)
mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts");
/*----------------------------------------------------------------------
* Read the sparse matrix file
*---------------------------------------------------------------------*/
numbering = (numbering ? - 1 : 0);
for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) {
do {
if (gk_getline(&line, &lnlen, fpin) == -1)
gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i);
} while (line[0] == '%');
head = line;
tail = NULL;
/* Read vertex sizes */
if (readsizes) {
#ifdef __MSC__
mat->rsizes[i] = (float)strtod(head, &tail);
#else
mat->rsizes[i] = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1);
if (mat->rsizes[i] < 0)
errexit("The size for vertex %zd must be >= 0\n", i+1);
head = tail;
}
/* Read vertex weights */
if (readwgts) {
for (l=0; l<ncon; l++) {
#ifdef __MSC__
mat->rwgts[i*ncon+l] = (float)strtod(head, &tail);
#else
mat->rwgts[i*ncon+l] = strtof(head, &tail);
#endif
if (tail == head)
errexit("The line for vertex %zd does not have enough weights "
"for the %d constraints.\n", i+1, ncon);
if (mat->rwgts[i*ncon+l] < 0)
errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l);
head = tail;
}
}
/* Read the rest of the row */
while (1) {
ival = (int)strtol(head, &tail, 0);
if (tail == head)
break;
head = tail;
if ((rowind[k] = ival + numbering) < 0)
gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i);
ncols = gk_max(rowind[k], ncols);
if (readvals == 1) {
#ifdef __MSC__
fval = (float)strtod(head, &tail);
#else
fval = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k);
head = tail;
rowval[k] = fval;
}
k++;
}
rowptr[i+1] = k;
}
if (format == GK_CSR_FMT_METIS) {
ASSERT(ncols+1 == mat->nrows);
mat->ncols = mat->nrows;
}
else {
mat->ncols = ncols+1;
}
if (k != nnz)
gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in "
"the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k);
gk_fclose(fpin);
gk_free((void **)&line, LTERM);
return mat;
}
/**************************************************************************/
/*! Writes the row-based structure of a matrix into a file.
\param mat is the matrix to be written,
\param filename is the name of the output file.
\param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR,
GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL.
\param writevals is either 1 or 0 indicating if the values will be
written or not. This is only applicable when GK_CSR_FMT_CSR
is used.
\param numbering is either 1 or 0 indicating if the internal 0-based
numbering will be shifted by one or not during output. This
is only applicable when GK_CSR_FMT_CSR is used.
*/
/**************************************************************************/
void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering)
{
ssize_t i, j;
FILE *fpout;
if (format == GK_CSR_FMT_BINROW) {
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout);
fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout);
if (writevals)
fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout);
gk_fclose(fpout);
return;
}
if (format == GK_CSR_FMT_BINCOL) {
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout);
fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout);
if (writevals)
fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout);
gk_fclose(fpout);
return;
}
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
if (format == GK_CSR_FMT_CLUTO) {
fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]);
writevals = 1;
numbering = 1;
}
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0));
if (writevals)
fprintf(fpout, " %f", mat->rowval[j]);
}
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
}
/*************************************************************************/
/*! Prunes certain rows/columns of the matrix. The prunning takes place
by analyzing the row structure of the matrix. The prunning takes place
by removing rows/columns but it does not affect the numbering of the
remaining rows/columns.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param minf is the minimum number of rows (columns) that a column (row) must
be present in order to be kept,
\param maxf is the maximum number of rows (columns) that a column (row) must
be present at in order to be kept.
\returns the prunned matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf)
{
ssize_t i, j, nnz;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind, *collen;
float *rowval, *nrowval;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval");
switch (what) {
case GK_CSR_COL:
collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ASSERT(rowind[j] < ncols);
collen[rowind[j]]++;
}
}
for (i=0; i<ncols; i++)
collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0);
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (collen[rowind[j]]) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&collen, LTERM);
break;
case GK_CSR_ROW:
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) {
for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight entries whose
sum accounts for a certain fraction of the overall weight of the
row/column.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param norm indicates the norm that will be used to aggregate the weights
and possible values are 1 or 2,
\param fraction is the fraction of the overall norm that will be retained
by the kept entries.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction)
{
ssize_t i, j, nnz;
int nrows, ncols, ncand, maxlen=0;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval, rsum, tsum;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++)
maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<ncols; i++) {
for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
gk_free((void **)&cand, LTERM);
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
for (i=0; i<nrows; i++)
maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<nrows; i++) {
for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[rowptr[i]+j] = cand[j].val;
nrowval[rowptr[i]+j] = cand[j].key;
}
nrowptr[i+1] = rowptr[i]+j;
}
gk_free((void **)&cand, LTERM);
}
/* compact nrowind/nrowval */
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight top-K entries
along each row/column and those entries whose weight is greater than
a specified value.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param topk is the number of the highest weight entries to keep.
\param keepval is the weight of a term above which will be kept. This
is used to select additional terms past the first topk.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval)
{
ssize_t i, j, k, nnz;
int nrows, ncols, ncand;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++) {
for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++) {
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
for (; j<ncand; j++) {
if (cand[j].key < keepval)
break;
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&cand, LTERM);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++, nnz++) {
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
for (; j<ncand; j++, nnz++) {
if (cand[j].key < keepval)
break;
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&cand, LTERM);
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the terms whose contribution to
the total length of the document is greater than a user-splied multiple
over the average.
This routine assumes that the vectors are normalized to be unit length.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param zscore is the multiplicative factor over the average contribution
to the length of the document.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore)
{
ssize_t i, j, nnz;
int nrows;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
float *rowval, *nrowval, avgwgt;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval");
switch (what) {
case GK_CSR_COL:
gk_errexit(SIGERR, "This has not been implemented yet.\n");
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
avgwgt = zscore/(rowptr[i+1]-rowptr[i]);
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] > avgwgt) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Compacts the column-space of the matrix by removing empty columns.
As a result of the compaction, the column numbers are renumbered.
The compaction operation is done in place and only affects the row-based
representation of the matrix.
The new columns are ordered in decreasing frequency.
\param mat the matrix whose empty columns will be removed.
*/
/**************************************************************************/
void gk_csr_CompactColumns(gk_csr_t *mat)
{
ssize_t i;
int nrows, ncols, nncols;
ssize_t *rowptr;
int *rowind, *colmap;
gk_ikv_t *clens;
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap");
clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens");
for (i=0; i<ncols; i++) {
clens[i].key = 0;
clens[i].val = i;
}
for (i=0; i<rowptr[nrows]; i++)
clens[rowind[i]].key++;
gk_ikvsortd(ncols, clens);
for (nncols=0, i=0; i<ncols; i++) {
if (clens[i].key > 0)
colmap[clens[i].val] = nncols++;
else
break;
}
for (i=0; i<rowptr[nrows]; i++)
rowind[i] = colmap[rowind[i]];
mat->ncols = nncols;
gk_free((void **)&colmap, &clens, LTERM);
}
/*************************************************************************/
/*! Sorts the indices in increasing order
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of
indices to sort.
*/
/**************************************************************************/
void gk_csr_SortIndices(gk_csr_t *mat, int what)
{
int n, nn=0;
ssize_t *ptr;
int *ind;
float *val;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
n = mat->nrows;
ptr = mat->rowptr;
ind = mat->rowind;
val = mat->rowval;
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
n = mat->ncols;
ptr = mat->colptr;
ind = mat->colind;
val = mat->colval;
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
#pragma omp parallel if (n > 100)
{
ssize_t i, j, k;
gk_ikv_t *cand;
float *tval;
#pragma omp single
for (i=0; i<n; i++)
nn = gk_max(nn, ptr[i+1]-ptr[i]);
cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand");
tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval");
#pragma omp for schedule(static)
for (i=0; i<n; i++) {
for (k=0, j=ptr[i]; j<ptr[i+1]; j++) {
if (j > ptr[i] && ind[j] < ind[j-1])
k = 1; /* an inversion */
cand[j-ptr[i]].val = j-ptr[i];
cand[j-ptr[i]].key = ind[j];
tval[j-ptr[i]] = val[j];
}
if (k) {
gk_ikvsorti(ptr[i+1]-ptr[i], cand);
for (j=ptr[i]; j<ptr[i+1]; j++) {
ind[j] = cand[j-ptr[i]].key;
val[j] = tval[cand[j-ptr[i]].val];
}
}
}
gk_free((void **)&cand, &tval, LTERM);
}
}
/*************************************************************************/
/*! Creates a row/column index from the column/row data.
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which index
will be created.
*/
/**************************************************************************/
void gk_csr_CreateIndex(gk_csr_t *mat, int what)
{
/* 'f' stands for forward, 'r' stands for reverse */
ssize_t i, j, k, nf, nr;
ssize_t *fptr, *rptr;
int *find, *rind;
float *fval, *rval;
switch (what) {
case GK_CSR_COL:
nf = mat->nrows;
fptr = mat->rowptr;
find = mat->rowind;
fval = mat->rowval;
if (mat->colptr) gk_free((void **)&mat->colptr, LTERM);
if (mat->colind) gk_free((void **)&mat->colind, LTERM);
if (mat->colval) gk_free((void **)&mat->colval, LTERM);
nr = mat->ncols;
rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
case GK_CSR_ROW:
nf = mat->ncols;
fptr = mat->colptr;
find = mat->colind;
fval = mat->colval;
if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM);
if (mat->rowind) gk_free((void **)&mat->rowind, LTERM);
if (mat->rowval) gk_free((void **)&mat->rowval, LTERM);
nr = mat->nrows;
rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rptr[find[j]]++;
}
MAKECSR(i, nr, rptr);
if (rptr[nr] > 6*nr) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
SHIFTCSR(i, nr, rptr);
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rval[rptr[find[j]]++] = fval[j];
}
SHIFTCSR(i, nr, rptr);
}
}
else {
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++) {
k = find[j];
rind[rptr[k]] = i;
rval[rptr[k]++] = fval[j];
}
}
}
else {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
}
SHIFTCSR(i, nr, rptr);
}
}
/*************************************************************************/
/*! Normalizes the rows/columns of the matrix to be unit
length.
\param mat the matrix itself,
\param what indicates what will be normalized and is obtained by
specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL.
\param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm
*/
/**************************************************************************/
void gk_csr_Normalize(gk_csr_t *mat, int what, int norm)
{
ssize_t i, j;
int n;
ssize_t *ptr;
float *val, sum;
if (what&GK_CSR_ROW && mat->rowval) {
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j]; /* assume val[j] > 0 */
}
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
if (what&GK_CSR_COL && mat->colval) {
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++)
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j];
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
}
/*************************************************************************/
/*! Applies different row scaling methods.
\param mat the matrix itself,
\param type indicates the type of row scaling. Possible values are:
GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2.
*/
/**************************************************************************/
void gk_csr_Scale(gk_csr_t *mat, int type)
{
ssize_t i, j;
int nrows, ncols, nnzcols, bgfreq;
ssize_t *rowptr;
int *rowind, *collen;
float *rowval, *cscale, maxtf;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
switch (type) {
case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .5 + .5*rowval[j]/maxtf;
}
}
break;
case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .1 + .9*rowval[j]/maxtf;
}
}
break;
case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j])));
}
}
}
break;
case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j]))));
}
}
}
break;
case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65));
}
}
}
break;
case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75));
}
}
}
break;
case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85));
}
}
}
break;
case GK_CSR_LOG: /* TF' = 1+log_2(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
double logscale = 1.0/log(2.0);
#pragma omp for schedule(static,32)
for (i=0; i<rowptr[nrows]; i++) {
if (rowval[i] != 0.0)
rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale;
}
#ifdef XXX
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale;
//rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale);
}
}
#endif
}
break;
case GK_CSR_IDF: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
#pragma omp parallel if (ncols > OMPMINOPS)
{
#pragma omp for schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0);
}
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
}
gk_free((void **)&cscale, &collen, LTERM);
break;
case GK_CSR_IDF2: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
nnzcols = 0;
#pragma omp parallel if (ncols > OMPMINOPS)
{
#pragma omp for schedule(static) reduction(+:nnzcols)
for (i=0; i<ncols; i++)
nnzcols += (collen[i] > 0 ? 1 : 0);
bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols));
printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq);
#pragma omp for schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0);
}
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
}
gk_free((void **)&cscale, &collen, LTERM);
break;
default:
gk_errexit(SIGERR, "Unknown scaling type of %d\n", type);
}
}
/*************************************************************************/
/*! Computes the sums of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
sums to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSums(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *sums;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rsums)
gk_free((void **)&mat->rsums, LTERM);
sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->csums)
gk_free((void **)&mat->csums, LTERM);
sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
default:
gk_errexit(SIGERR, "Invalid sum type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the squared of the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the similarity between two rows/columns
\param mat the matrix itself. The routine assumes that the indices
are sorted in increasing order.
\param i1 is the first row/column,
\param i2 is the second row/column,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of
objects between the similarity will be computed,
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\returns the similarity between the two rows/columns.
*/
/**************************************************************************/
float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype)
{
int nind1, nind2;
int *ind1, *ind2;
float *val1, *val2, stat1, stat2, sim;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
nind1 = mat->rowptr[i1+1]-mat->rowptr[i1];
nind2 = mat->rowptr[i2+1]-mat->rowptr[i2];
ind1 = mat->rowind + mat->rowptr[i1];
ind2 = mat->rowind + mat->rowptr[i2];
val1 = mat->rowval + mat->rowptr[i1];
val2 = mat->rowval + mat->rowptr[i2];
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
nind1 = mat->colptr[i1+1]-mat->colptr[i1];
nind2 = mat->colptr[i2+1]-mat->colptr[i2];
ind1 = mat->colind + mat->colptr[i1];
ind2 = mat->colind + mat->colptr[i2];
val1 = mat->colval + mat->colptr[i1];
val2 = mat->colval + mat->colptr[i2];
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return 0.0;
}
switch (simtype) {
case GK_CSR_COS:
case GK_CSR_JAC:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else {
sim += val1[i1]*val2[i2];
stat1 += val1[i1]*val1[i1];
stat2 += val2[i2]*val2[i2];
i1++;
i2++;
}
}
if (simtype == GK_CSR_COS)
sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);
else
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_MIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_AMIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1 > 0.0 ? sim/stat1 : 0.0);
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
return sim;
}
/*************************************************************************/
/*! Finds the n most similar rows (neighbors) to the query using cosine
similarity.
\param mat the matrix itself
\param nqterms is the number of columns in the query
\param qind is the list of query columns
\param qval is the list of correspodning query weights
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\param nsim is the maximum number of requested most similar rows.
If -1 is provided, then everything is returned unsorted.
\param minsim is the minimum similarity of the requested most
similar rows
\param hits is the result set. This array should be at least
of length nsim.
\param i_marker is an array of size equal to the number of rows
whose values are initialized to -1. If NULL is provided
then this array is allocated and freed internally.
\param i_cand is an array of size equal to the number of rows.
If NULL is provided then this array is allocated and freed
internally.
\returns the number of identified most similar rows, which can be
smaller than the requested number of nnbrs in those cases
in which there are no sufficiently many neighbors.
*/
/**************************************************************************/
int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind,
float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits,
int *i_marker, gk_fkv_t *i_cand)
{
ssize_t i, ii, j, k;
int nrows, ncols, ncand;
ssize_t *colptr;
int *colind, *marker;
float *colval, *rnorms, mynorm, *rsums, mysum;
gk_fkv_t *cand;
if (nqterms == 0)
return 0;
nrows = mat->nrows;
ncols = mat->ncols;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker"));
cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand"));
switch (simtype) {
case GK_CSR_COS:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
break;
case GK_CSR_JAC:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
rnorms = mat->rnorms;
mynorm = gk_fdot(nqterms, qval, 1, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key);
break;
case GK_CSR_MIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
rsums = mat->rsums;
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key);
break;
/* Assymetric MIN similarity */
case GK_CSR_AMIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/mysum;
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
/* go and prune the hits that are bellow minsim */
for (j=0, i=0; i<ncand; i++) {
marker[cand[i].val] = -1;
if (cand[i].key >= minsim)
cand[j++] = cand[i];
}
ncand = j;
if (nsim == -1 || nsim >= ncand) {
nsim = ncand;
}
else {
nsim = gk_min(nsim, ncand);
gk_dfkvkselect(ncand, nsim, cand);
gk_fkvsortd(nsim, cand);
}
gk_fkvcopy(nsim, cand, hits);
if (i_marker == NULL)
gk_free((void **)&marker, LTERM);
if (i_cand == NULL)
gk_free((void **)&cand, LTERM);
return nsim;
}
|
GB_unaryop__abs_int32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_int64
// op(A') function: GB_tran__abs_int32_int64
// C type: int32_t
// A type: int64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_int64
(
int32_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sc35.c | /*TODO in future s
- Non-equilibrium candidate moves
- check scaling of particles of different sizes - should scale with contact area!
- cell list - divide simulation box in cells where
particles interact with each other and outside is definitely 0 - safe time
better scaling with system size, possibly long spherovylinders could be in several
celles to keep good scaling
- better cluster algorithm - put in wang-landau
- cluster list work for spherocylinders only now
*/
/*------------------------------------------------------------------------------
Version 3.5
- linear bond at spherocylinders, where second spherocilinder is harmonicaly
attached to a point that is in distance of bondlength from the first spherocylinder
and it follows the direction of spherocylinder
- bonded particles belong to the same cluster
- print energy at statistical reports
- have particles of different lengths
- interaction scaling back to v1+v2 (no addition of 1.0) - more physical
*/
/*------------------------------------------------------------------------------
Version 3.4
- New handling of the option file
- reaction coordinate radius around z axis for a pore calculations
- reaction coordinate as number of particles in contact (defined by distance of CMs)
- 2D Wang-Landau method
- New Wang-Landau coordinate - radius pore in vesicle around begining of xy plane
- New models TPSC, TCPSC, TCHPSC, TCHCPSC- models with two patches
note that switch function on sides of patch are linear in cos angle not in angle
as a results two patches with overlaping sides do not compensate easily to a flat profile
- FIX chirality was doubled (angle twice as large)
- Added posibility of exluded interactions [EXCLUDE] in topology file
- MPI replica exchange with different temperatures and pressure (paraltemp paralpress)
input configuration is #{number of process}config.init, if it does not exist config.init is used
each replica is with different random seed = seed+mpirank
- config.init can look like movie snapshot
- MPI exchange with Wang-Landau
- added angular interaction between neighboring spherocylinders (in chain)
angle1 is angle between sc directions and angle2 ins angle between the patches
*/
/*-------------------------------------------------------------------------------
Version 3.3
-external potantial can be added as a part of topology - it can be hard or attractive wall
*/
/**
* Changes made by Noah S. Bieler and Robert Vacha:
*
* New version 3.2
*
* - The length has now to be specified in the topology file, but they are not
* allowed to differ from each other. The option file shall no longer contain
* a length option.
* - The particles can now switch their type based on the chemical potential
* delta_mu (= energy difference from state 2 to state 1).
* - For that a new option was introduced: Average attempts per sweep to switch
* a type.
* - A lot of variables are now combined in either topo, sim or conf. The rule
* should be:
* > topo: Everything that belongs to the topology and that should not change
* during the game.
* > sim: Options and stuff, that has to do with the simulation. (Maybe the
* current target and so should be saved in there as well)
* > conf: What changes every step concerning the particles and the box or
* in other words: what has been read from conf.init
* - added a cluster determing routine => sim->clusterlist + sim->clusters
* - added macros for TRUE and FALSE
* - Added Option for the random seed
* - Basic Neighbour list implemented
* - New types: chiral CPSC (CHCPSC) and chiral PSC (CHPSC) and their interactions
*/
/*--------------------------------------------------------------------------------
sc31.c
Patchy Spherocylinder Version 3.1
Wang-Landau method of free energy calculations
It is set in options file as:
O = none, 1 = z-distance of 1st paticle from system CM, 2 = hole in xyplane of SCA = membrane hole
It reads a file wl.dat and write wl-new at the end. There is value of alpha at the first line and then
there are three columns:
1- order parameter, 2- weights, 3- histogram
Interaction of spherocylinders is scaled based on the volume of attractive patch, the unit of one
is that two spheres of diameter sigma =1.0 are attracting each other by 1.0. Using this in interaction
among lipids and spherocylinders should be consistent.
Start up configuration "config.init" file has a box size at the first line now.
(I tested performance: compilation with optimization -O2 speed up 10%
rest has negligible effect including usage of static arrays instead of dynamic
most of the time consumes paire function.
6,519,638,177 :simulate
6,492,411,300 :energyone
5,705,685,593 :paire
542,561,887 :bondenergy
489,463,361 :eattractive11
450,443,970 :image
115,126,519 :erepulsive
*/
/* --------------------------------------------------------------------------------
sc3.c
Patchy Spherocylinder Version 3.0
Beads were added to the particle list.
bead(10) - repulsive
bead(11) - isotropocally attractive
-It is necessary to provide also a topology file (top.init)
-Particles are placed in chains according to the topology order including connections
-Particle arryas are allocated dynamicly on heap now
-dispacement and rotation are optimized for highest RMSD performace
-NPT ensemble with isotropic and anisotropic couplings, in pressure moves all
particles are rescaled with their center (chains are not rescaled with CM)
0 - anisotropic coupling, 1 - isotropic coupling, 2 - isotropic in xy z=const
bead types and their interactions
repulsive(10) purely repulsive shpere with WCA potential on closest distance
parameters: Patch repulsion sigma - defined where repulsion reaches zero
isotropic(11) - isotropic cos^2 potential is acting isotropicaly dependent only on
closest distance between obejcts.
Parameters: distance of attractivity (should be at least
sigma*2^(1/6)) defines how far is attraction constant -e. After this distance
follows switch length on which attraction goes to zero as cos^2.
Rest as repulsive model.
sc2.c
Patchy Spherocylinder Version 2.0
It is possible to make chains of spherocylinders that are connected through
hemispherical caps by harmonic bond. There are two parameters eq distance and
strength of harmonic spring, note that units are in 1 kT/e, the MC strength of bond
is changing with parameter temperature..
Patchy Spherocylinder Version 1.0
Includes diffferent types of possible interactions:
repulsive(0) - purely repulsive spherocylinder with WCA potential on closest distance.
parameters: Patch repulsion sigma - defined where repulsion reaches zero.
isotropic(1) - isotropic cos^2 potential is acting isotropicaly dependent only on
closest distance between spherocylinders.
Parameters: distance of patch, Interaction distance of patch (should be at least
sigma*2^(1/6)) defines how far is attraction constant -e. After this distance
follows Switch length on which attraction goes to zero as cos^2. Rest as repulsive model.
patchy(2) - Attractive potential in limited to an angular wedge on spherocylinder. Patch
goes all the way through, making also hemispherical caps on end attractive.
Parameters:Anglular part has a parameter defining it size "Angular size of patch
(degrees)" and witdh of switch function "Angular switch off of patch (degrees)" on which
attraction reaches zero - it is a linear function. Rest as isotropic model.
cylindrical(3) - Attractive potential in limited to an angular wedge on cylindrical part
of spherocylinders. The hemispherical caps on ends are repulsive. Rest as
patchy model.
Note particles are inside numbered from 0, there is prealocated size of particles MAXN
because in future there can be grand canonical ensamble and number of particles may vary
Follows mc of hard wall spherocylinder version 7 by Mark Miller -description below
sc.c
Version 1
Performs basic constant volume MC simulation of hard spherocylinders with rigid
cuboidal boundary conditions.
Run parameters are read in from the file "options". The template for this file
appears at the end of the code. The values must be inserted before the colons.
The initial configuration is read from the file "config.init". The first line contain size
of box The format for the file is nine columns: three for the positions and three for the
direction vector and three for direction of pathc. The direction vectors are normalised
after being read in. The configuration is checked for particle overlaps.
The unit of length is taken as the spherocylinder diameter. Hence the ratio
L/D is equal to the length of the cylinder.
Order parameters for nematic and smectic order are evaluated. The nematic order
parameter is related to the coefficient of the quadratic term in the Legendre
expansion of the orientational distribution function. Any smectic order is
assumed to be directed along the z axis, and is detected by the coefficients
of the Fourier expansion of the position distribution function.
MM 12.vii.01
..................................................................................
Version 2
The aspect ratio of the box may now fluctuate, keeping the volume constant.
Two new parameters are required in the options file to specify the average number
of attempted shape changes per sweep, and the initial maximum trial change in
a box dimension.
Shape changes are made by picking one of the three box lengths at random,
making a random change, evenly distributed between plus and minus a finite
interval, choosing a second direction and doing the same, then determining
the new length in the remaining direction from the condition of constant
volume.
The step-size equilibration period is now split into three parts: displacement,
rotation, and shape change.
The most important change to the code is that the particle coordinates are
now stored as fractions of the box dimensions. However, input and output
configurations are still communicated in units of the cylinder diameter, D=1.
Note that the displacement maximum step size is now specified as a fraction of
the box length, not as an absolute distance.
MM 18.vii.01
..................................................................................
Version 3
Constant pressure MC. The volume may fluctuate. Volume changes are attempted
by altering just one box length at a time, chosen at random. The running
average of the density is calculated and reported.
MM 24.vii.01
..................................................................................
Version 7
The composite translation-plus-rotation moves have been split into separate
move types, each of which is attempted with equal probability. This enables
acceptance ratios to be accumulated separately for these degrees of freedom, so
that maximum step sizes can be adjusted more sensibly.
A few other things have been tidied up, such as defining structures for the
book-keeping of statistics and acceptance ratios.
MM 9.v.02
--------------------------------------------------------------------------------*/
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <stdbool.h>
#ifdef MACOS
# include "getline.h"
#endif
#ifdef MPI
# include <mpi.h>
#endif
/* Macros for DEBUG messages */
#ifdef DEBUGGING_INIT
#define DEBUG_INIT(...) fprintf(stderr, "DB in INIT: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr);
#else
#define DEBUG_INIT(...)
#endif
#ifdef DEBUGGING_SIM
#define DEBUG_SIM(...) fprintf(stderr, "DB in SIM: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr);
#else
#define DEBUG_SIM(...)
#endif
#ifdef DEBUGGING
#define DEBUG(...) fprintf(stderr, "DB: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr);
#else
#define DEBUG(...)
#endif
/* End of DEBUG macros */
/* With pairlist ? */
#define WITH_PAIRLIST
/* Boolean Macros */
#define BOOL int
#define TRUE 1
#define FALSE 0
/* End of Boolean Macros */
#define MAXF 20 /* Maximum number of Fourier terms */
#define MAXN 14000 /* Maximum number of particles */
#define MAXCHL 10 /* Maximum length of chain */
#define ZEROTOL 1.0e-12 /* Dot products below ZEROTOL are deemed zero */
#define ZEROTOL2 1.0e-8 /* numbers below ZEROTOL are deemed zero */
#define PI 3.141592653589793238462643383279 /* pi */
#define PIH 1.57079632679489661923132169163975 /* pi half*/
/*Particle types*/
#define SC 10 /*spherocylinder*/
#define SCN SC+0 /*spherocylinder non-attractive*/
#define SCA SC+1 /*spherocylinder isotropicaly attractive*/
#define PSC SC+2 /*spherocylinder with patchy attraction*/
#define CPSC SC+3 /*spherocylinder with cylindrical patchy attraction*/
#define CHPSC SC+4 /* chiral psc */
#define CHCPSC SC+5 /* chiral cpsc */
#define TPSC SC+6 /*spherocylinder with two patches*/
#define TCPSC SC+7 /*spherocylinder with two cylindrical patches*/
#define TCHPSC SC+8 /* chiral 2psc */
#define TCHCPSC SC+9 /* chiral 2cpsc */
#define SP 30 /*sphere - should be over all apherocylinders*/
#define SPN SP+0 /* sphere non-attractive*/
#define SPA SP+1 /* spherocylinder isotropicaly attractive*/
#define MAXT 30 /* Maximum number of types we have*/
#define MAXMT 100 /* Maximum number of molecular types */
/*Reading topology*/
#define SMSTR 64 /* Small string length */
#define STRLEN 400 /* maximum length of line*/
#define CONTINUE '\\' /* symbol for line continue*/
#define COMMENTSIGN '#' /* symbol for comment*/
#define OPENKEY '[' /* starting sign for keyword*/
#define CLOSEKEY ']' /* ending sign for keyword*/
#define SEPARATOR ':' /* sign for separator*/
#define OPENMOL '{' /* starting sign for molecules*/
#define CLOSEMOL '}' /* ending sign for molecules*/
#define BOXSEP 'x' /* extraction of box*/
/* Wang Landau method */
#define WL_GERR 0.0001 /* Max roughnes in histogram */
#define WL_ALPHATOL 0.000001 /* Covergence crietria for detailed balance */
#define WL_MINHIST 1000 /* Minimum histogram sampling for considering roughness */
#define WL_ZERO 0.000000000000 /* Zero for histogram with current weights*/
#define WL_CONTACTS 36.0 /* Square distance under which are particles in contact */
/* Math */
#define DOT(a,b) ((a).x * (b).x + (a).y * (b).y + (a).z * (b).z) /* Dot product */
#define AVER(a,b) ((a+b)*0.5) /* Arithmetic average*/
#define ROUND(a) (a > 0.0) ? floor(a + 0.5) : ceil(a - 0.5); /* Round double*/
#define PMONE(a) (1 - 2 * a) /* Takes 1 or 0, return +-1 */
/* Acceptance ratio */
#define RATIO(a) ( ((a).acc+(a).rej) > 0 ? 1.0*(a).acc/((a).acc+(a).rej) : 0.0 )
#define INBOX(a,b) ( a > 0 ? modf(a,&b) : modf(a,&b)+1 )
/*................................................................
Structure definitions
*/
struct vector { /* Define a 3D vector structure */
double x;
double y;
double z;
};
struct quat { /* Define a quaternion structure */
double w;
double x;
double y;
double z;
};
struct particles { /* Define a particle */
struct vector pos; /* Position vector */
struct vector dir; /* Unit direction vector of axis */
struct vector patchdir[2]; /* Vector defining orientation of patch */
struct vector patchsides[4]; /* Vector defining sides of patch */
struct vector chdir[2]; /* Direction for chirality - keep in memory to increase speed */
long chaint; /* Chain type*/
long chainn; /* Chain number*/
int type; /* Type of the particle */
int switchtype; /* With which kind of particle do you want to switch?*/
double delta_mu; /* Chemical potential for the switch */
int switched; /* 0: in initial stat; 1: in the switched stat */
};
struct ia_param{ /* Contatins properties and parameters of particle types */
char name[SMSTR]; /* The name of the particle type */
char other_name[SMSTR]; /* The name of the particle type */
int geotype[2]; /* The geometrical type: spherocylinder (0-repulsive, 1-isotropic, 2-patchy, 3-cylindrical)
or sphere (0-repulsive, 1-isotropic) */
double sigma; /* Repulsion wca*/
double epsilon; /* Repulsion strength*/
double pdis; /* Interaction distance of patch */
double pswitch; /* Switch of distance of patch */
double pangl[4]; /* angular size of patch as was specifid in input */
double panglsw[4]; /* angular size of patchswitch as was specifid in input */
double pcangl[4]; /* cosine of half size angle - rotation from patch direction to side */
double pcanglsw[4]; /* cosine of half size angle plus switch - rotation from patch direction to side */
double rcut; /* Cutoff for attraction */
double rcutwca; /* Cutoff for repulsion*/
double pcoshalfi[4]; /* Cosine of half angle going to side of interaction */
double psinhalfi[4]; /* Sine of half angle going to side of interaction -useful for quaterion rotation */
double csecpatchrot[2]; /* Cosine of Rotation of second patches in 2psc models*/
double ssecpatchrot[2]; /* Sine of Rotation of second patches in 2psc models*/
double volume; /* Volume of particle for geometrical center calculations*/
double pvolscale; /* Scale of patch volume size*/
double len[2]; /* Length of the PSC */
double half_len[2]; /* Half length of the PSC */
double chiral_cos[2]; /* Coctains the cosinus for the chiral rotation of the patch */
double chiral_sin[2]; /* Contains the sinus for the chiral rotation of the patch */
};
struct interacts { /* Parameters pased to functions of interaction calculation */
double dist; /* closest distance */
struct vector distvec; /* vector of closes distance */
struct particles * part1; /* particle 1 */
struct particles * part2; /* particle 2 */
struct vector box; /* box size */
struct ia_param * param; /* interaction parameters */
struct vector r_cm; /* vector connecting center of masses */
double distcm; /* distance between center of masses */
double dotrcm; /* square size of r_cm*/
double contt; /* closest point on spherocylinder to sphere */
};
struct chainparams { /*Parameters for inner interaction in chains*/
double bond1eq; /* Equilibrium distance of harmonic bond between nearest neighbours*/
double bond1c; /* Spring constant for harmonic bond between nearest neighbours*/
double bond2eq; /* Equilibrium distance of harmonic bond between second nearest neighbours*/
double bond2c; /* Spring constant for harmonic bond between second nearest neighbours*/
double bonddeq; /* Equilibrium distance of directional harmonic bond between the nearest neighbours*/
double bonddc; /* Spring constant for directional harmonic bond between the nearest neighbours*/
double angle1eq; /* Equilibrium angle between two spherocylinders -neerest neighbours*/
double angle1c; /* Spring constant angle between two spherocylinders -nearest neighbours*/
double angle2eq; /* Equilibrium angle between two spherocylinder patches -nearest neighbours*/
double angle2c; /* Spring constant for angle between two spherocylinder patches -nearest neighbours*/
};
struct molecule { /* This structure is for io only */
char * name; /* The name of the molecule */
long * type; /* The type of the particle */
long * switchtype; /* The switchtype of the particle */
double * delta_mu; /* The chemical potential for the switch */
};
struct disp { /* Define step size and acceptance ratio statistics */
double mx; /* Maximum value displacement, cos(angle), etc. */
double angle; /* Maximum angle, since in .mx cos(angle) is saved */
long acc; /* Number of accepted steps */
long rej; /* Number of rejected steps */
double oldrmsd; /* Averaged mx value in previous equilibration round */
double oldmx; /* Change in mx in last equlibrium step */
};
struct stat { /* Define statistics counters */
double sum;
double sum2;
long samples;
double mean;
double rms;
};
struct meshs { /* Mesh for hole order parameter */
int dim[2]; /* Mesh dimensions */
int *data; /* Mesh data */
int *tmp; /* tmpporary list for hole search */
};
struct wls { /* Wang landau method (wl) */
double *weights; /* Array of weights for wl method */
long *hist; /* Array of histogram for wl method */
long length[2]; /* Length of above arrays */
double dorder[2]; /* Increments of order parameter */
double minorder[2]; /* Minimum order parameter */
double alpha; /* Current modifier of weights */
long currorder[2]; /* Walue of current order parameter*/
long neworder[2]; /* wl order parameter in new step */
long max; /* wl maximum of histogram */
long min; /* wl minimum of histogram */
double wmin; /* weights minimum */
int wlmdim; /* Dimwnsionality of wang landau */
int wlmtype; /* Atom type for the Wang landau method (wl) */
double wl_meshsize; /* Size of mesh bin for hole order paremeter*/
struct meshs mesh; /* Mesh for hole order */
struct meshs origmesh; /* Mesh store for rejected moves */
long * radiushole; /* Array for hole radius around origin */
long * radiusholeold; /* Array for hole radius around origin-bigmove */
long radiusholemax; /* Size of array for hole radius*/
long partincontact; /* Number of particles in contact */
long partincontactold; /* Number of particles in contact - old for move*/
};
struct pairs{ /* The structure holding the particle numbers of the pairs and the number of pairs */
long num_pairs; /* The number of pairs */
long * pairs; /* The paritcle numbers of the paris */
};
struct pairlist{ /* I think, this is done too complicated: just sim->pairs[npart] should be enough */
struct pairs * list; /* contains the pairlist of all paritcles */
};
struct cluster{ /* contains all the particles of one cluster */
long npart;
long * particles;
};
struct exters{
BOOL exist; /* existence of external potential*/
double thickness; /* external wall thicnkess*/
double epsilon; /* depth of attraction*/
double attraction; /* distance of attraction*/
double sqmaxcut; /* distance when nothing can interact*/
struct ia_param interactions[MAXT]; /* Interaction parameters with particle types generated from above params*/
};
struct topo{ /* It would be nice, if this struct would contain all the topo stuff in the end*/
long * switchlist; /* List containing the number of all the particles with switchtypes */
long n_switch_part; /* number of particles with switchtype */
double sqmaxcut; /* square of distance over which even spherocylinders cannot interact (distance between CM) */
double maxcut; /* distance over which even spherocylinders cannot interact (distance between CM) */
long conlist[MAXN][4]; /* Connectivity list, we have connection to tail and head and secon neighbours so far*/
long chainlist[MAXN][MAXCHL]; /* List of chains*/
long chainnum; /* Number of chains */
struct chainparams chainparam[MAXMT]; /* parameters for chains */
struct ia_param ia_params[MAXT][MAXT]; /* parametrization of particles for all interations*/
long npart; /* Number of particles */
struct exters exter; /* external potential - wall */
};
struct sim{ /* Should contain mostly all the simulation options and variables, that can change in every step. */
double press; /* Pressure */
double paralpress; /* Parallel pressure for replica exachnge*/
double dpress; /* Pressure change for replica exchange*/
double shave; /* Average number of volume changes to attempt per sweep */
double shprob; /* Probability of attempting a volume change */
double chainprob; /* Average number of chain move attempt per sweep */
double switchprob; /* Average number of type switch attempt per sweep */
int pairlist_update; /* Number of sweep per upedating the pairlist */
double temper; /* Temperature*/
double paraltemper; /* Temperature for parallel tempering */
double dtemp; /* Temprature step */
int ptype; /* Type of pressure coupling*/
long adjust; /* Number of sweeps between step size adjustments */
long movie; /* Number of sweeps between movie frames */
long nequil; /* Number of equilibration sweeps */
long nsweeps; /* Number of production sweeps */
long paramfrq; /* Number of sweeps between order parameter samples */
long report; /* Number of sweeps between statistics reports */
// long terms; /* Number of Fourier terms as smectic order parameters */
long nrepchange; /* Number of sweeps between replica exchanges */
int wlm[2]; /* Wang landau method (wl) */
struct disp edge; /* Maximum box length change and statistics */
struct disp rot[MAXT]; /* Maximum rotation and statistics */
struct disp trans[MAXT]; /* Maximum translation and statistics*/
struct disp chainm[MAXMT]; /* Maximum translation for chain and statistics*/
struct disp chainr[MAXMT]; /* Maximum rotation for chain and statistics */
struct disp mpiexch; /* MPI statistics*/
struct pairs * pairlist; /* The pairlist */
long write_cluster; /* Number of sweeps per writing out cluster info */
long * clusterlist; /* clusterlist[i] = cluster index of particle i */
struct cluster * clusters; /* informations about the single clusters */
double *clustersenergy; /* list of energies of clusters*/
long num_cluster; /* number of single clusters */
long * clusterstat; /* Statistics about the seize of cluster */
long max_clust; /* maximal clustersize */
struct wls wl; /* Wang landau data */
int mpirank; /* MPI number for given process*/
int mpinprocs; /* MPI number of processes */
};
typedef enum { /* Holds the type of a variable in struct option */
Int,
Int2,
Long,
Double
} Type;
typedef struct { /* for reading in the options */
char *id; /* The name of the value in the option file*/
Type type; /* The type (int, double or long) */
BOOL set; /* Wheter the variable has been set */
void *var; /* The variable */
} Option;
struct conf{ /* Configuration of the system*/
struct particles * particle; /* All particles*/
struct vector box; /* Box size*/
double sysvolume; /* Something like total mass*/
struct vector syscm; /* System center of mass*/
};
struct filenames {
/* input files */
char configurationinfile[30];
char topologyfile[30];
char optionsfile[30];
char wlinfile[30];
/* output files */
char configurationoutfile[30];
char moviefile[30];
char wloutfile[30];
char statfile[30];
char clusterfile[30];
char clusterstatfile[30];
char energyfile[30];
};
struct mpiexchangedata{ /* extra type for mpi communication*/
struct vector box; /* box of configuration */
double energy; /* energy of configuration */
double volume; /* volume of configuration */
int accepted; /* bool if accepted */
struct vector syscm; /* system CM of configuration */
long radiusholemax; /* size of array for WL*/
long wl_order[2]; /* wang-landau order parameter*/
};
#ifdef MPI
MPI_Datatype MPI_vector, MPI_Particle, MPI_exchange;
#endif
const struct stat nullstat = {0.0, 0.0, 0, 0.0, 0.0};
long seed = 6; /* Seed for random number generator */
/*..............................................................................*/
int main(int argc, char **argv)
{
DEBUG("start");
FILE *outfile,*mov; /* Handle for writing configuration */
double (* intfce[MAXT][MAXT])(struct interacts *); /*array of interaction functions*/
struct topo topo; /* will maybe contain all the topo stuff in future */
struct sim sim; /* Should contain the simulation options. */
struct conf conf; /* Should contain fast changing particle and box(?) information */
struct filenames files;
int memoryalloc(struct conf * conf);
int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim);
void read_options(struct sim* sim, char filename[30]);
void init_top(struct topo *, struct conf * conf, struct sim * sim, char filename[30]);
void init_config(struct topo * topo, struct conf * conf, struct sim * sim, char filename[30]);
void init_intfce(double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo);
void draw(FILE *, struct conf * conf, struct topo * topo);
void printeqstat(struct disp *, double, int);
void simulate(long nsweeps, long adjust, long paramfrq, long report,
double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct sim * sim, struct conf * conf, struct filenames *files);
void init_pairlist(struct topo * topo, struct sim * sim);
void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf);
void print_pairlist(FILE * stream, struct sim * sim, struct topo * topo);
int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *) );
int print_clusterlist(FILE * stream, BOOL decor, struct topo * topo, struct sim * sim, struct conf * conf);
int print_clusters(FILE * stream, BOOL decor, struct sim * sim);
int print_clusterstat(FILE * stream, BOOL decor, struct sim * sim);
int sort_clusterlist(struct topo * topo, struct sim * sim);
printf ("\nPatchy Spherocylinders version 3.5 ");
sprintf(files.configurationinfile, "config.init");
sprintf(files.configurationoutfile, "config.last");
sprintf(files.optionsfile, "options");
sprintf(files.topologyfile, "top.init");
sprintf(files.moviefile, "movie");
sprintf(files.wlinfile, "wl.dat");
sprintf(files.wloutfile, "wl-new.dat");
sprintf(files.statfile, "stat.dat");
sprintf(files.clusterfile, "cluster.dat");
sprintf(files.clusterstatfile, "cluster_stat.dat");
sprintf(files.energyfile, "energy.dat");
#ifdef MPI
FILE *infile;
printf(" MPI version");
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &(sim.mpinprocs) );
MPI_Comm_rank(MPI_COMM_WORLD, &(sim.mpirank) );
sprintf(files.configurationoutfile, "%dconfig.last", sim.mpirank);
sprintf(files.moviefile, "%dmovie", sim.mpirank);
sprintf(files.wloutfile, "%dwl-new.dat", sim.mpirank);
sprintf(files.clusterfile, "%dcluster.dat", sim.mpirank);
sprintf(files.clusterstatfile, "%dcluster_stat.dat", sim.mpirank);
sprintf(files.energyfile, "%denergy.dat", sim.mpirank);
sprintf(files.statfile, "%dstat.dat", sim.mpirank);
/*test if there is a specific input configuration for mpi run*/
sprintf(files.configurationinfile, "%dconfig.init", sim.mpirank);
infile = fopen(files.configurationinfile, "r");
if (infile != NULL)
fclose (infile);
else sprintf(files.configurationinfile, "config.init");
/*test if there is a specific input wang-landau for mpi run*/
sprintf(files.wlinfile, "%dwl.dat", sim.mpirank);
infile = fopen(files.wlinfile, "r");
if (infile != NULL)
fclose (infile);
else sprintf(files.wlinfile, "wl.dat");
#endif
printf ("\n-------------------------------------\n");
printf ("Reading options...\n");
read_options(&sim,files.optionsfile);
init_top(&topo, &conf, &sim,files.topologyfile);
if (topo.chainnum ==0) {
/*no chain make the probability of moving them 0*/
if (sim.chainprob > 0)
printf ("No chains... chain move probability set to 0.\n");
sim.chainprob = 0;
}
printf ("\nReading configuration...\n");
init_config(&topo, &conf, &sim, files.configurationinfile);
printf ("Equilibration of maximum step sizes: %ld sweeps\n", sim.nequil/2);
fflush (stdout);
if ( sim.wlm[0] > 0 ) {
outfile = fopen(files.wlinfile, "r");
if (outfile == NULL) {
printf ("ERROR: Cannot open file for Wang-Landau method (%s).\n",files.wlinfile);
memorydealloc(&conf, &topo, &sim);
exit(1);
}
fclose (outfile);
}
/* Empty movie file */
mov = fopen("movie", "w");
fclose (mov);
printf ("\nInitializing energy functions...\n");
init_intfce(intfce, &topo);
if (sim.pairlist_update) {
init_pairlist(&topo, &sim);
}
if (sim.nequil) {
printf("\nStart equilibration...\n");
simulate(sim.nequil/2, sim.adjust, 0, 0, intfce, &topo, &sim, &conf,&files);
simulate(sim.nequil/2, 0, 0, 0, intfce, &topo, &sim, &conf,&files);
printf (" Equilibrated maximum displacement / acceptance ratio: \n");
printeqstat(sim.trans,2.0,MAXT);
printf (" Equilibrated maximum rotation / acceptance ratio: \n");
printeqstat(sim.rot,1.0,MAXT);
printf (" Equilibrated maximum box length change / acceptance ratio: \n");
printf (" %.6le / %.6le\n", sim.edge.mx/2.0,RATIO(sim.edge));
printf (" Equilibrated maximum displacement of chain / acceptance ratio: \n");
printeqstat(sim.chainm,2.0,MAXMT);
printf (" Equilibrated maximum rotation of chain / acceptance ratio: \n");
printeqstat(sim.chainr,1.0,MAXMT);
printf ("\n");
printf ("Further equilibration of configuration: %ld sweeps\n", sim.nequil/2);
fflush (stdout);
outfile = fopen("config.eq", "w");
fprintf (outfile, "%15.8le %15.8le %15.8le\n", conf.box.x, conf.box.y, conf.box.z);
draw (outfile, &conf, &topo);
fclose (outfile);
printf (" Equilibrated configuration written to config.eq\n");
printf (" Box dimensions: %.10lf, %.10lf, %.10lf\n\n", conf.box.x, conf.box.y, conf.box.z);
}
printf ("Production run: %ld sweeps\n\n", sim.nsweeps);
fflush (stdout);
simulate(sim.nsweeps, 0, sim.paramfrq, sim.report, intfce, &topo, &sim, &conf,&files);
#ifdef MPI
printf (" MPI replica changeT / changeP / acceptance ratio: \t %.6lf / %.6lf / %.6lf\n\n", sim.mpiexch.mx,sim.mpiexch.angle,RATIO(sim.mpiexch));
#endif
outfile = fopen(files.configurationoutfile, "w");
#ifdef TESTING
fprintf (outfile, "%15.6le %15.6le %15.6le\n", conf.box.x, conf.box.y, conf.box.z);
#else
fprintf (outfile, "%15.8le %15.8le %15.8le\n", conf.box.x, conf.box.y, conf.box.z);
#endif
draw (outfile, &conf, &topo);
fclose (outfile);
// For testing the pairlist
//gen_pairlist(&topo, &sim, &conf);
//FILE * fpairlist;
//fpairlist = fopen("pairlist.dat", "w");
//print_pairlist(fpairlist, &sim, &topo);
//fclose(fpairlist);
//printf("sqmaxcut = %lf\n", topo.sqmaxcut);
//// For testing the cluster algorithm
//gen_clusterlist(&topo, &sim, &conf);
//print_clusterlist(stdout, TRUE, &topo, &sim, &conf);
//sort_clusterlist(&topo, &sim);
//print_clusters(stdout, TRUE, &sim);
//print_clusterstat(stdout, TRUE, &sim);
if (memorydealloc(&conf, &topo, &sim))
exit(1);
#ifdef MPI
MPI_Finalize();
#endif
printf ("\nDone\n\n");
return 0;
}
/*..............................................................................*/
/*.........................SIMULATION RUN.......................................*/
/*..............................................................................*/
void simulate(long nsweeps, long adjust, long paramfrq, long report,
double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct sim * sim, struct conf * conf, struct filenames *files)
{
long i,j,wli;
long next_adjust; /* Next sweep number for step size adjustment */
long next_calc; /* Next sweep number for order parameter calculation */
long next_dump; /* Next sweep number for reporting statistics */
long next_frame; /* Next sweep number for dumping a movie frame */
long step; /* Step number within a given sweep */
long sweep; /* Current sweep number */
//struct stat nem; /* Nematic order parameter */
//struct stat vol; /* Volume statistics */
//struct stat shapex, shapey, shapez; /* Box shape statistics */
//struct stat smec[MAXF]; /* Smectic order parameters (Fourier coeeficients) */
FILE *mf; /* Handle for movie file */
FILE *cl_stat, *cl, *cl_list; /* Handle for cluster statistics */
FILE *ef, *statf; /* Handle for energy file and statistical file*/
double edriftstart; /* Energy drift calculation - start */
double edriftchanges; /* Energy drift calculation - accumulate all changes through moves */
double edriftend; /* Energy drift calculation - end */
double pvdriftstart; /* PV drift calculation - start */
double pvdriftend; /* PV drift calculation - end */
double volume; /* volume of box*/
double moveprobab; /* random number selecting the move*/
/* function declarations */
//double nematic(long, struct particles *);
double ran2(long *);
//double smectic(long, struct particles *, long);
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
void accumulate(struct stat *, double);
void draw(FILE *, struct conf * conf, struct topo * topo);
void optimizestep(struct disp *, double, double);
void optimizerot(struct disp *, double, double);
void partvecinit(struct topo * topo, struct sim * sim, struct conf * conf );
int wlinit(struct wls *, char filename[30]);
int wlwrite(struct wls *, char filename[30]);
int wlend(struct wls *);
int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim);
int mesh_end(struct meshs *);
long z_order(struct wls *, struct conf * conf,int);
long twopartdist(struct wls *, struct conf * conf,int);
void mesh_print (struct meshs *);
void masscenter(long, struct ia_param [MAXT][MAXT], struct conf * conf);
void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf);
int write_cluster(FILE * cl_stat, FILE * cl, FILE * cl_list,
BOOL decor, long sweep, struct sim * sim, struct topo * topo,
struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *));
double particlemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *));
double chainmove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *));
double switchtypemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *));
double pressuremove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *));
double replicaexchangemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long sweep);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
long radiushole_position(double, struct sim *,int);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
double alignment_order(struct conf * conf, struct topo * topo);
/* Opening files for cluster statistics */
cl_stat = cl = cl_list = ef = statf = NULL;
if(sim->write_cluster){
// Empty file
cl_stat = fopen(files->clusterstatfile, "w");
fclose(cl_stat);
cl_stat = fopen(files->clusterstatfile, "a");
// Empty file
cl = fopen(files->clusterfile, "w");
fclose(cl);
cl = fopen(files->clusterfile, "a");
}
/* write energy*/
if (report < nsweeps){
// Empty file
ef = fopen(files->energyfile, "w");
fclose(ef);
ef = fopen(files->energyfile, "a");
fprintf (ef, "# sweep energy\n");
statf = fopen(files->statfile, "w");
fclose(statf);
statf = fopen(files->statfile, "a");
fprintf (statf, "# sweep volume\n");
}
/*=== Initialise counters etc. ===*/
// double pvolume; /* Volume of all particles*/
/* pvolume =0.0;
for (i=0;i < topo->npart;i++) {
if (conf->particle[i].type>=0 )
pvolume += topo->ia_params[conf->particle[i].type][conf->particle[i].type].volume;
}*/
sim->shprob = sim->shave/(double)topo->npart;
for (i=0;i<MAXT;i++){
sim->rot[i].acc = 0;
sim->rot[i].rej = 0;
sim->rot[i].oldrmsd = 0;
sim->rot[i].oldmx = 0;
sim->trans[i].acc = 0;
sim->trans[i].rej = 0;
sim->trans[i].oldrmsd = 0;
sim->trans[i].oldmx = 0;
}
for (i=0;i<MAXMT;i++){
sim->chainm[i].acc = 0;
sim->chainm[i].rej = 0;
sim->chainm[i].oldrmsd = 0;
sim->chainm[i].oldmx = 0;
sim->chainr[i].acc = 0;
sim->chainr[i].rej = 0;
sim->chainr[i].oldrmsd = 0;
sim->chainr[i].oldmx = 0;
}
//(*edge).acc = (*edge).rej = (*edge).oldrmsd = (*edge).oldmx = 0;
sim->edge.acc = sim->edge.rej = sim->edge.oldrmsd = sim->edge.oldmx = 0;
sim->mpiexch.acc = sim->mpiexch.rej = sim->mpiexch.oldrmsd = sim->mpiexch.oldmx = 0;
/*Initialize some values at begining*/
partvecinit(topo,sim,conf);
next_adjust = adjust;
next_calc = paramfrq;
next_dump = report;
next_frame = sim->movie;
//nem = vol = shapex = shapey = shapez = nullstat;
//for (i=0; i<MAXF; i++) smec[i] = nullstat;
if (sim->movie > 0) {
mf = fopen(files->moviefile, "a");
} else {
mf = NULL;
}
sim->wl.wl_meshsize = 0;
sim->wl.radiushole = NULL;
sim->wl.radiusholeold = NULL;
sim->wl.radiusholemax = 0;
sim->wl.partincontactold = 0;
sim->wl.partincontact = 0;
sim->wl.wlmdim = 0;
sim->wl.wlmdim = 0;
sim->wl.length[0]=0;
sim->wl.length[1]=0;
sim->wl.currorder[0]=0;
sim->wl.currorder[1]=0;
sim->wl.neworder[0]=0;
sim->wl.neworder[1]=0;
sim->wl.weights = NULL;
sim->wl.hist = NULL;
masscenter(topo->npart,topo->ia_params, conf);
/* Initialization of wang-landaou method*/
if ( sim->wlm[0] >0 ) {
if (wlinit(&sim->wl,files->wlinfile) != 0)
return;
sim->wl.wlmdim = 1 ;
if ( sim->wlm[1] > 0 )
sim->wl.wlmdim = 2 ;
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1:
masscenter(topo->npart,topo->ia_params, conf);
sim->wl.currorder[wli] = z_order(&sim->wl,conf,wli);
break;
case 2:
sim->wl.wl_meshsize = (topo->ia_params[sim->wl.wlmtype][sim->wl.wlmtype].sigma) / 3.0; // TODO
sim->wl.mesh.data = NULL;
sim->wl.mesh.tmp = NULL;
sim->wl.origmesh.data = NULL;
sim->wl.origmesh.tmp = NULL;
sim->wl.currorder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
case 3:
sim->wl.currorder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] );
break;
case 4:
sim->wl.currorder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
masscenter(topo->npart,topo->ia_params, conf);
sim->wl.radiusholemax = 0;
sim->wl.radiushole = NULL;
sim->wl.radiusholeold = NULL;
sim->wl.currorder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
sim->wl.radiusholemax = 0;
sim->wl.radiushole = NULL;
sim->wl.radiusholeold = NULL;
sim->wl.currorder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.currorder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.currorder[wli] = 0;
break;
}
if ( (sim->wl.currorder[wli] >= sim->wl.length[wli] ) || (sim->wl.currorder[wli] < 0) ) {
printf("Error: starting Wang-Landau method with order parameter %f out of range(%f - %f)\n\n", sim->wl.dorder[wli]*sim->wl.currorder[wli] + \
sim->wl.minorder[wli], sim->wl.minorder[wli], sim->wl.minorder[wli]+sim->wl.dorder[wli]*sim->wl.length[wli] );
wlend(&sim->wl);
return;
}
}
if (sim->wl.alpha < WL_ALPHATOL/100) sim->wl.alpha = WL_ZERO;
fflush (stdout);
}
/*do moves - START OF REAL MC*/
if(sim->pairlist_update){
gen_pairlist(topo, sim, conf); // Does that solve the problem?
}
/*do energy drift check - start calculation*/
volume = conf->box.x * conf->box.y * conf->box.z;
edriftstart = calc_energy(0, intfce, 0, topo, conf, sim,0);
pvdriftstart = sim->press * volume - (double)topo->npart * log(volume) / sim->temper;
//printf("starting energy: %.15f \n",calc_energy(0, intfce, 0, topo, conf, sim,0));
//printf("press: %.15f\n",sim->press * volume - (double)topo->npart * log(volume) / sim->temper);
edriftchanges = 0.0;
for (sweep=1; sweep <= nsweeps; sweep++) {
// Try replica exchange
if((sim->nrepchange) && (sweep % sim->nrepchange == 0)){
edriftchanges += replicaexchangemove(topo,sim,conf,intfce,sweep);
}
// Generate the pairlist
if((sim->pairlist_update) && (sweep % sim->pairlist_update == 0)){
gen_pairlist(topo, sim, conf);
}
//normal moves
for (step=1; step <= topo->npart; step++) {
moveprobab = ran2(&seed);
if ( moveprobab < sim->shprob) {
/* pressure moves*/
edriftchanges += pressuremove(topo,sim,conf,intfce);
} else {
if (moveprobab < sim->shprob + sim->chainprob) {
/* single particle moves*/
edriftchanges += chainmove(topo,sim,conf,intfce);
}
else if (moveprobab < sim->shprob + sim->chainprob + sim->switchprob){
/*=== This is an attempt to switch a type ===*/
edriftchanges += switchtypemove(topo,sim,conf,intfce);
} else {
/* single particle moves*/
edriftchanges += particlemove(topo,sim,conf,intfce);
} /* end of else next to chain moves */
} /* end of else next to volume moves */
}
/**** End of step loop for this sweep ****/
/*=== Start of end-of-sweep housekeeping ===*/
/* Adjustment of maximum step sizes during equilibration */
if (sweep == next_adjust) {
for (i = 0; i < MAXT ;i++) {
if ((sim->trans[i].acc > 0)||(sim->trans[i].rej >0))
optimizestep (sim->trans + i, 1.5, 0.0);
if ((sim->rot[i].acc > 0)||(sim->rot[i].rej >0))
optimizerot (sim->rot + i, 5.0, 0.01);
}
for (i = 0; i < MAXMT; i++) {
if ((sim->chainm[i].acc > 0)||(sim->chainm[i].rej > 0))
optimizestep (sim->chainm + i, 1.5, 0.0);
if ((sim->chainr[i].acc > 0)||(sim->chainr[i].rej > 0))
optimizerot (sim->chainr + i, 5.0, 0.01);
}
optimizestep (&(sim->edge), 1.0, 0.0);
next_adjust += adjust;
}
if ( (sim->wlm[0] > 0) && (sim->wl.alpha > WL_ZERO) && !(sweep % 1000) ) {
/* recalculate system CM to be sure there is no accumulation of errors by +- rejection moves*/
/* BUG - not used any longer: caused problems with PBC normal moves systemCM movement
can be calculated from CM movements of individual particles
present center of mass calculation use pbc and thus particles that moved across the box
is in this calculation used in pripary box but in other moves in in in the particles position
if ( (sim->wlm[0] == 1) || (sim->wlm[1] == 1) )
masscenter(topo->npart,topo->ia_params, conf);
*/
sim->wl.min = sim->wl.hist[0];
sim->wl.max = sim->wl.hist[0];
for (i=0;i < sim->wl.length[0];i++) {
j=0;
if ( sim->wl.hist[i+j*sim->wl.length[0]] > sim->wl.max ) sim->wl.max = sim->wl.hist[i+j*sim->wl.length[0]];
if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]];
for (j=1;j < sim->wl.length[1];j++) {
if ( sim->wl.hist[i+j*sim->wl.length[0]] > sim->wl.max ) sim->wl.max = sim->wl.hist[i+j*sim->wl.length[0]];
if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]];
}
}
if ( sim->wl.min > WL_MINHIST ) {
if ( sim->temper * log(sim->wl.max/sim->wl.min) < WL_GERR ) {
/*DEBUG
for (i=1;i<wl.length;i++) {
printf (" %15.8le %15ld %15.8f\n",sim->wl.weights[i],sim->wl.hist[i],particle[0].pos.z);
fflush(stdout);
}
*/
if ( sim->wl.alpha < WL_ALPHATOL) break;
sim->wl.alpha/=2;
printf("%f \n", sim->wl.alpha);
fflush (stdout);
sim->wl.wmin = sim->wl.weights[0];
for (i=0;i < sim->wl.length[0];i++) {
j=0;
sim->wl.hist[i+j*sim->wl.length[0]] = 0;
sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin;
for (j=1;j < sim->wl.length[1];j++) {
sim->wl.hist[i+j*sim->wl.length[0]] = 0;
sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin;
}
}
}
}
}
if (!(sweep % 10000)) {
/*reinitialize pach vectors to avoid cummulation of errors*/
partvecinit(topo,sim,conf);
}
/* Sampling of statistics */
if (sweep == next_calc)
{
/*s2 = nematic(npart, particle);
accumulate (&nem, s2);
for (i=0; i<terms; i++) {
ci = smectic(npart, particle, i+1);
accumulate (&smec[i], ci);
}
accumulate (&shapex, (*box).x);
accumulate (&shapey, (*box).y);
accumulate (&shapez, (*box).z);
volume = (*box).x * (*box).y * (*box).z;
accumulate (&vol, volume);
next_calc += paramfrq;
*/
}
/* Writing of statistics */
if (sweep == next_dump) {
/*printf ("Statistics after %ld sweeps:\n", sweep);
printf (" Mean and RMS fluctuation of S2: %13.8lf %13.8lf\n",
nem.mean, nem.rms);
for (i=0; i<terms; i++) {
printf (" Mean & fluc. Fourier coeff. %3ld: %13.8lf %13.8lf\n",
i+1, smec[i].mean, smec[i].rms);
}
printf (" Mean & fluc box dimensions: x %13.8lf %13.8lf\n",
shapex.mean, shapex.rms);
printf (" y %13.8lf %13.8lf\n",
shapey.mean, shapey.rms);
printf (" z %13.8lf %13.8lf\n",
shapez.mean, shapez.rms);
printf (" Mean & fluctuation volume: %13.8lf %13.8lf\n",
vol.mean, vol.rms);
printf (" Mean & fluc. volume over volume of particles: %13.8lf %13.8lf\n",
vol.mean/pvolume, vol.rms/pvolume);
printf ("\n");
fflush (stdout);
*/
fprintf (statf, " %ld; %.10lf\n", sweep, conf->box.x * conf->box.y * conf->box.z);
fprintf (ef, " %ld; %.10lf %f \n", sweep, calc_energy(0, intfce, 0, topo, conf, sim,0), alignment_order(conf,topo));
if (sim->wlm[0] > 0) {
wlwrite(&sim->wl,files->wloutfile);
}
next_dump += report;
}
/* Writing of movie frame */
if (sweep == next_frame) {
fprintf (mf, "%ld\n", topo->npart);
fprintf (mf, "sweep %ld; box %.10lf %.10lf %.10lf\n", sweep, conf->box.x, conf->box.y, conf->box.z);
draw (mf, conf, topo);
fflush (mf);
next_frame += sim->movie;
}
/* Writing out cluster statistics */
if(sim->write_cluster && (sweep % sim->write_cluster == 0)){
write_cluster(cl_stat, cl, cl_list, FALSE, sweep, sim, topo, conf, intfce);
}
/*=== End of housekeeping ===*/
}
/**** End of sweeps loop ****/
/*do energy drift check - at the end calculation*/
volume = conf->box.x * conf->box.y * conf->box.z;
edriftend = calc_energy(0, intfce, 0, topo, conf, sim,0);
pvdriftend = sim->press * volume - (double)topo->npart * log(volume) / sim->temper;
printf("Energy drift: %.15lf \n",edriftend - edriftstart - edriftchanges +pvdriftend -pvdriftstart);
printf("Starting energy+pv: %.8lf \n",edriftstart+pvdriftstart);
printf("Starting energy: %.8lf \n",edriftstart);
fflush(stdout);
/* End wang-landau*/
if (sim->wlm[0] > 0) {
sim->wl.min = sim->wl.hist[0];
for (i=0;i < sim->wl.length[0];i++) {
j=0;
if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]];
for (j=1;j < sim->wl.length[1];j++) {
if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]];
}
}
sim->wl.wmin = sim->wl.weights[0];
for (i=0;i < sim->wl.length[0];i++) {
j=0;
sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin;
for (j=1;j < sim->wl.length[1];j++) {
sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin;
}
}
wlwrite(&sim->wl,files->wloutfile);
wlend(&sim->wl);
if ( (sim->wlm[0] == 2)||(sim->wlm[1] == 2) ) {
mesh_end(&sim->wl.mesh);
mesh_end(&sim->wl.origmesh);
}
if ( (sim->wlm[0] == 5)||(sim->wlm[1] == 5)||(sim->wlm[0] == 6)||(sim->wlm[1] == 6) ) {
if ( sim->wl.radiushole != NULL ) free(sim->wl.radiushole);
if ( sim->wl.radiusholeold != NULL ) free(sim->wl.radiusholeold);
}
}
/*end movie*/
if (sim->movie > 0)
fclose (mf);
/*end cluster*/
if(sim->write_cluster){
fclose(cl_stat);
fclose(cl);
}
if (report < nsweeps) {
fclose(ef);
fclose(statf);
}
}
/*..................................MOVES.........................................*/
/*................................................................................*/
/*..............................PARTICLE MOVES....................................*/
double particlemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *))
{
double edriftchanges =0.0;
long target;
double ran2(long *);
double partdisplace(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *),long target);
double partrotate(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *),long target);
/*=== This is a particle move step ===*/
target = ran2(&seed) * topo->npart;
if ( !( ((sim->wlm[0] == 3) || (sim->wlm[1] == 3) ) && (target == 0) ) && \
((ran2(&seed) < 0.5) || (topo->ia_params[conf->particle[target].type][conf->particle[target].type].geotype[0] >= SP)) ) { /* no rotation for spheres */
//target = 1;
//printf ("displacement\n\n");
edriftchanges = partdisplace(topo,sim,conf,intfce,target);
} else {
/*=== Rotation step ===*/
edriftchanges = partrotate(topo,sim,conf,intfce,target);
}
/*=== End particle move step ===*/
return edriftchanges;
}
/*................................................................................*/
double partdisplace(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *),long target)
{
double edriftchanges,energy,enermove,wlener;
struct vector orig, dr, origsyscm;
int reject=0,wli;
double radiusholemax_orig=0;
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *, long);
void wlaccept(int, struct wls *);
long meshorder_moveone(struct vector, struct vector, struct meshs *, long, long, \
struct conf * conf, struct sim * sim, int wli);
int mesh_cpy(struct meshs *, struct meshs *);
//void mesh_print (struct meshs *);
long z_order(struct wls *, struct conf * conf, int wli);
long twopartdist(struct wls *, struct conf *conf, int wli);
struct vector ranvec(void);
int longarray_cpy (long **, long **, long, long);
long radiusholeorder_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target, int wli,struct vector *);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
long contparticles_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
/*=== Displacement step ===*/
edriftchanges =0.0;
origsyscm.x = 0;
origsyscm.y = 0;
origsyscm.z = 0;
energy = calc_energy(target, intfce, 1, topo, conf, sim,0);
orig = conf->particle[target].pos;
dr = ranvec();
//ran = sqrt(ran2(&seed));
dr.x *= sim->trans[conf->particle[target].type].mx/conf->box.x;
dr.y *= sim->trans[conf->particle[target].type].mx/conf->box.y;
dr.z *= sim->trans[conf->particle[target].type].mx/conf->box.z;
if ( ((sim->wlm[0] == 3)||(sim->wlm[1] == 3)) && (target == 0) ) {
dr.z = 0;
dr.y = 0;
dr.x = 0;
}
conf->particle[target].pos.x += dr.x;
conf->particle[target].pos.y += dr.y;
conf->particle[target].pos.z += dr.z;
//} while (conf->particle[target].pos.x < 0.25 || conf->particle[target].pos.x > 0.50);
reject = 0;
wlener = 0.0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1: origsyscm = conf->syscm;
conf->syscm.x += dr.x * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
conf->syscm.y += dr.y * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
conf->syscm.z += dr.z * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli);
break;
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = meshorder_moveone(orig, conf->particle[target].pos, &sim->wl.mesh, topo->npart, target, conf, sim,wli);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
origsyscm = conf->syscm;
conf->syscm.x += dr.x * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
conf->syscm.y += dr.y * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
conf->syscm.z += dr.z * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
if ( target == 0 )
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
else
sim->wl.neworder[wli] = radiusholeorder_moveone(&orig, conf, sim,target,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
if ( target == 0 )
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
else
sim->wl.neworder[wli] = contparticles_moveone(&orig,conf,sim,target,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calcualte energy */
enermove = calc_energy(target, intfce, 1, topo, conf, sim,0);
}
if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */
conf->particle[target].pos = orig;
sim->trans[conf->particle[target].type].rej++;
if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) )
conf->syscm = origsyscm;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->trans[conf->particle[target].type].acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
//printf("%lf\t%lf\n", conf->particle[0].pos.z * conf->box.z , enermove);
//printf("%.12f\t%.12f\t%.12f\n", energy , enermove,edriftchanges);
}
return edriftchanges;
}
/*................................................................................*/
double partrotate(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *),long target)
{
double edriftchanges,energy,enermove,wlener;
struct particles origpart;
int reject=0,wli;
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *,long);
void wlaccept(int, struct wls *);
void normalise(struct vector *);
void ortogonalise(struct vector *,struct vector);
void psc_rotate(struct particles *,double,int);
/*=== Rotation step ===*/
//printf ("rotation %ld npart %ld\n\n",target,npart);
energy = calc_energy(target, intfce, 1, topo, conf, sim,0);
origpart = conf->particle[target];
psc_rotate(&conf->particle[target],sim->rot[conf->particle[target].type].angle, topo->ia_params[conf->particle[target].type][conf->particle[target].type].geotype[0]);
/*should be normalised and ortogonal but we do for safety*/
normalise (&conf->particle[target].dir);
ortogonalise(&conf->particle[target].patchdir[0],conf->particle[target].dir);
reject = 0;
edriftchanges =0.0;
wlener = 0.0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 3:
if (target == 0) sim->wl.neworder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] );
else sim->wl.neworder[wli] = sim->wl.currorder[wli];
/* only rotation change direction */
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calcualte energy */
enermove = calc_energy(target, intfce, 1, topo, conf, sim,0);
}
if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */
conf->particle[target] = origpart;
sim->rot[conf->particle[target].type].rej++;
wlreject(sim,sim->wl.radiusholemax);
} else { /* move was accepted */
// DEBUG
//fprintf(fenergy, "%lf\t%lf\n", conf->particle[1].pos.x * conf->box.x , enermove);
sim->rot[conf->particle[target].type].acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
//printf("%lf\t%lf\n", conf->particle[0].patchdir[0].z, enermove);
}
return edriftchanges;
}
/*..................... This is an attempt to switch a type.................................*/
double switchtypemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *) )
{
double edriftchanges,energy,enermove,wlener;
int reject=0,wli;
long target;
double radiusholemax_orig=0;
double ran2(long *);
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *,long);
void wlaccept(int, struct wls *);
void int_partvec(long, struct ia_param *, struct conf *);
int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim);
int mesh_cpy(struct meshs *, struct meshs *);
long z_order(struct wls *, struct conf * conf,int);
long twopartdist(struct wls *, struct conf *conf,int);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
int longarray_cpy (long **target, long **source,long,long);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
/*=== This is an attempt to switch a type ===*/
edriftchanges =0.0;
wlener = 0.0;
target = ran2(&seed) * topo->n_switch_part;
target = topo->switchlist[target];
DEBUG_SIM("Switching the particle type");
DEBUG_SIM("PARTICLE: %ld", target);
energy = calc_energy(target, intfce, 1, topo, conf, sim,0);
// Start switching the type
int switched = conf->particle[target].switched;
int pmone = PMONE(switched);
DEBUG_SIM("switched = %d", switched);
DEBUG_SIM("pmone = %d", pmone);
int tmp_type = conf->particle[target].type;
conf->particle[target].type = conf->particle[target].switchtype;
conf->particle[target].switchtype = tmp_type;
conf->particle[target].switched += pmone;
int_partvec(target,&(topo->ia_params[conf->particle[target].type][conf->particle[target].type]),conf);
DEBUG_SIM("Particle %ld is %d switched", target, switched);
//DEBUG
#ifdef DEBUGGING_SIM
if ((abs(pmone) != 1) || (conf->particle[target].type == conf->particle[target].switchtype)){
fprintf(stderr, "ERROR: Something went wrong, when switching the type of particle %ld\n", target);
exit(1);
}
#endif
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
/*case 1: sim->wl.neworder = z_order(&sim->wl, conf,wli);
break;*/
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
/*case 4:
sim->wl.neworder = twopartdist(&sim->wl,conf,wli);
break;*/
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) {
enermove = conf->particle[target].delta_mu * pmone;
// DEBUG
//double dmu = enermove;
//particle[target].switched += pmone;
enermove += calc_energy( target, intfce, 1, topo, conf, sim,0);
//printf("energy: %lf \t %lf\t%lf\n",particle[target].delta_mu, dmu, enermove);
}
// If not accepted: switch back
if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */
DEBUG_SIM("Did NOT switch it\n");
conf->particle[target].switchtype = conf->particle[target].type;
conf->particle[target].type = tmp_type;
conf->particle[target].switched -= pmone;
int_partvec(target,&(topo->ia_params[conf->particle[target].type][conf->particle[target].type]),conf);
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
return edriftchanges;
}
/*.................................CHAIN MOVES....................................*/
/*................................................................................*/
double chainmove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *))
{
double edriftchanges =0.0;
long target;
double ran2(long *);
double chaindisplace(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long target);
double chainrotate(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long target);
/*=== This is a chain move step ===*/
target = ran2(&seed) * topo->chainnum;
if (ran2(&seed) < 0.5) {
/*=== Displacement step of cluster/chain ===*/
edriftchanges = chaindisplace(topo,sim,conf,intfce,target);
} else {
/*=== Rotation step of cluster/chain ===*/
edriftchanges = chainrotate(topo,sim,conf,intfce,target);
} /* ==== END OF CHAIN MOVES ===== */
return edriftchanges;
}
/*................................................................................*/
double chaindisplace(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long target)
{
double edriftchanges,energy,enermove,wlener;
struct vector dr, origsyscm;
int reject=0,wli;
struct vector cluscm;
long current,i;
struct particles chorig[MAXCHL];
double radiusholemax_orig=0;
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *,long);
void wlaccept(int, struct wls *);
long meshorder_movechain(long [MAXN], struct meshs *, long, struct conf * conf, \
struct sim * sim, struct particles chorig[MAXCHL],int);
int mesh_cpy(struct meshs *, struct meshs *);
//void mesh_print (struct meshs *);
long z_order(struct wls *, struct conf * conf,int);
long twopartdist(struct wls *, struct conf *conf,int);
struct vector ranvec(void);
int longarray_cpy (long **target, long **source,long,long);
long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, \
struct sim * sim,struct particles chorig[MAXCHL],int,struct vector *);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
/*=== Displacement step of cluster/chain ===*/
//printf ("move chain\n\n");
energy =0.0;
wlener = 0.0;
edriftchanges=0.0;
i=0;
current = topo->chainlist[target][0];
cluscm.x = 0;
cluscm.y = 0;
cluscm.z = 0;
origsyscm.x = 0;
origsyscm.y = 0;
origsyscm.z = 0;
while (current >=0 ) { /* store old configuration calculate energy*/
chorig[i].pos = conf->particle[current].pos;
energy += calc_energy(current, intfce, 2, topo, conf, sim, target);
i++;
current = topo->chainlist[target][i];
}
dr = ranvec();
dr.x *= sim->chainm[conf->particle[target].chaint].mx/conf->box.x;
dr.y *= sim->chainm[conf->particle[target].chaint].mx/conf->box.y;
dr.z *= sim->chainm[conf->particle[target].chaint].mx/conf->box.z;
i=0;
if ( ((sim->wlm[0] == 3)||(sim->wlm[1] == 3)) && (target == 0) ) {
dr.z = 0;
dr.y = 0;
dr.x = 0;
}
current = topo->chainlist[target][0];
while (current >=0 ) { /* move chaine to new position */
if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) { /* calculate move of center of mass */
cluscm.x += dr.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.y += dr.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.z += dr.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
}
conf->particle[current].pos.x += dr.x;
conf->particle[current].pos.y += dr.y;
conf->particle[current].pos.z += dr.z;
i++;
current = topo->chainlist[target][i];
}
enermove = 0.0;
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1: origsyscm = conf->syscm;
conf->syscm.x += cluscm.x / conf->sysvolume;
conf->syscm.y += cluscm.y / conf->sysvolume;
conf->syscm.z += cluscm.z / conf->sysvolume;
sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli);
break;
case 2:
mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = meshorder_movechain(topo->chainlist[target], &sim->wl.mesh, topo->npart, conf, sim, chorig,wli);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
origsyscm = conf->syscm;
conf->syscm.x += cluscm.x / conf->sysvolume;
conf->syscm.y += cluscm.y / conf->sysvolume;
conf->syscm.z += cluscm.z / conf->sysvolume;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
if ( target == 0 )
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
else
sim->wl.neworder[wli] = radiusholeorder_movechain(topo->chainlist[target], conf, sim, chorig,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
if ( target == 0 )
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
else
sim->wl.neworder[wli] = contparticles_movechain(topo->chainlist[target],conf,sim,chorig,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]]
- sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calcualte energy */
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) {
enermove += calc_energy(current, intfce, 2, topo, conf, sim,target);
i++;
current = topo->chainlist[target][i];
}
}
if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) {
conf->particle[current].pos = chorig[i].pos;
i++;
current = topo->chainlist[target][i];
}
sim->chainm[conf->particle[target].chaint].rej++;
if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) )
conf->syscm = origsyscm;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->chainm[conf->particle[target].chaint].acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
return edriftchanges;
}
/*................................................................................*/
double chainrotate(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long target)
{
double edriftchanges,energy,enermove,wlener;
int reject=0,wli;
struct vector cluscm;
double chainvolume;
long current, i;
struct particles chorig[MAXCHL];
double radiusholemax_orig=0;
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *,long);
void wlaccept(int, struct wls *);
long meshorder_movechain(long [MAXN], struct meshs *, long, struct conf * conf, \
struct sim * sim, struct particles chorig[MAXCHL],int);
int mesh_cpy(struct meshs *, struct meshs *);
void cluster_rotate(long, struct vector, double, struct topo * topo, struct conf * conf);
long z_order(struct wls *, struct conf * conf,int);
long twopartdist(struct wls *, struct conf *conf,int);
int longarray_cpy (long **target, long **source,long,long);
long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,\
struct particles chorig[MAXCHL],int,struct vector *);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
/*=== Rotation step of cluster/chain ===*/
//printf ("rotation of chain\n\n");
energy=0.0; /* set values to zero*/
edriftchanges=0.0;
wlener = 0.0;
current = topo->chainlist[target][0];
cluscm.x = conf->particle[current].pos.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.y = conf->particle[current].pos.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.z = conf->particle[current].pos.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
chorig[0] = conf->particle[current];
chainvolume = topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
energy += calc_energy(current, intfce, 2, topo, conf, sim,target);
i=1;
current = topo->chainlist[target][i];
while (current >=0 ) { /* store old configuration calculate energy*/
chorig[i] = conf->particle[current];
/*We have chains whole! don't have to do PBC*/
/*r_cm.x = conf->particle[current].pos.x - conf->particle[first].pos.x;
r_cm.y = conf->particle[current].pos.y - conf->particle[first].pos.y;
r_cm.z = conf->particle[current].pos.z - conf->particle[first].pos.z;
if ( r_cm.x < 0 )
r_cm.x -= (double)( (long)(r_cm.x-0.5) );
else
r_cm.x -= (double)( (long)(r_cm.x+0.5) );
if ( r_cm.y < 0 )
r_cm.y -= (double)( (long)(r_cm.y-0.5) );
else
r_cm.y -= (double)( (long)(r_cm.y+0.5) );
if ( r_cm.z < 0 )
r_cm.z -= (double)( (long)(r_cm.z-0.5) );
else
r_cm.z -= (double)( (long)(r_cm.z+0.5) );
*/
cluscm.x += conf->particle[current].pos.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.y += conf->particle[current].pos.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
cluscm.z += conf->particle[current].pos.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
chainvolume += topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume;
energy += calc_energy(current, intfce, 2, topo, conf, sim,target);
i++;
current = topo->chainlist[target][i];
}
cluscm.x = cluscm.x/chainvolume;
cluscm.y = cluscm.y/chainvolume;
cluscm.z = cluscm.z/chainvolume;
/*do actual rotations around geometrical center*/
cluster_rotate(target, cluscm, sim->chainr[conf->particle[target].chaint].angle, topo, conf);
enermove=0.0;
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1:
if (target == 0) sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli);
else sim->wl.neworder[wli] = sim->wl.currorder[wli];
/* if we rotated cluster it is around its CM so no change*/
break;
case 2:
mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = meshorder_movechain(topo->chainlist[target], &sim->wl.mesh, topo->npart, conf, sim, chorig,wli);
break;
case 3:
if (target == 0) sim->wl.neworder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] );
else sim->wl.neworder[wli] = sim->wl.currorder[wli];
/* only rotation change direction */
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
if ( target == 0 )
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
else
sim->wl.neworder[wli] = radiusholeorder_movechain(topo->chainlist[target], conf, sim, chorig,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
if ( target == 0 )
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
else
sim->wl.neworder[wli] = contparticles_movechain(topo->chainlist[target],conf,sim,chorig,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calcualte energy */
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) {
enermove += calc_energy(current, intfce, 2, topo, conf, sim,target);
i++;
current = topo->chainlist[target][i];
}
}
if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) {
conf->particle[current] = chorig[i];
i++;
current = topo->chainlist[target][i];
}
sim->chainr[conf->particle[target].chaint].rej++;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->chainr[conf->particle[target].chaint].acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
return edriftchanges;
}
/*..............................PRESSURE MOVES....................................*/
/*................................................................................*/
double pressuremove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *))
{
double edriftchanges,energy,enermove,wlener;
int reject=0,wli;
double old_side; /* Box length before attempted change */
double *side; /* Box dimension to try changing */
double psch; /* Size of a box change during pressure */
double pvol; /* Size of a volume during pressure */
double pvoln; /* Size of a new volume during pressure */
double rsave; /* Saved random number */
double area;
double radiusholemax_orig=0;
double ran2(long *);
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
int movetry(double, double, double);
void wlreject(struct sim *,long);
void wlaccept(int, struct wls *);
int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim);
int mesh_cpy(struct meshs *, struct meshs *);
long z_order(struct wls *, struct conf * conf,int);
long twopartdist(struct wls *, struct conf *conf,int);
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *);
int longarray_cpy (long **target, long **source,long,long);
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli);
/*=== This is a volume change step ===*/
/*calculate energy*/
edriftchanges=0.0;
wlener = 0.0;
energy = calc_energy(0, intfce, 0, topo, conf, sim,0);
/* Choose an edge */
switch (sim->ptype) {
case 0:
/* Anisotropic pressure coupling */
rsave = ran2(&seed);
if (rsave < 1.0/3.0) {
side = &(conf->box.x);
area = conf->box.y * conf->box.z;
} else if (rsave < 2.0/3.0) {
side = &(conf->box.y);
area = conf->box.x * conf->box.z;
} else {
side = &(conf->box.z);
area = conf->box.x * conf->box.y;
}
old_side = *side;
*side += sim->edge.mx * (ran2(&seed) - 0.5);
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1:
sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli);
break;
case 2:
mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calculate energy */
enermove = sim->press * area * (*side - old_side) - (double)topo->npart * log(*side/old_side) / sim->temper;
enermove += calc_energy(0, intfce, 0, topo, conf, sim,0);
}
if ( reject || *side <= 0.0 || ( movetry(energy,enermove,sim->temper) ) ) { /* probability acceptance */
*side = old_side;
sim->edge.rej++;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->edge.acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
break;
case 1:
/* Isotropic pressure coupling */
psch = sim->edge.mx * (ran2(&seed) - 0.5);
pvol = conf->box.x * conf->box.y * conf->box.z;
conf->box.x += psch;
conf->box.y += psch;
conf->box.z += psch;
pvoln = conf->box.x * conf->box.y * conf->box.z;
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1: sim->wl.neworder[wli] = z_order(&sim->wl,conf,wli);
break;
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calcualte energy */
enermove = sim->press * (pvoln - pvol) - (double)topo->npart * log(pvoln/pvol) / sim->temper;
enermove += calc_energy(0, intfce, 0, topo, conf, sim,0);
}
if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */
conf->box.x -= psch;
conf->box.y -= psch;
conf->box.z -= psch;
sim->edge.rej++;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->edge.acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
break;
case 2:
/* Isotropic pressure coupling in xy, z constant */
psch = sim->edge.mx * (ran2(&seed) - 0.5);
pvol = conf->box.x * conf->box.y;
conf->box.x += psch;
conf->box.y += psch;
pvoln = conf->box.x * conf->box.y;
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
/*no change in case 1, it does not change box.z*/
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calculate energy */
enermove = sim->press * conf->box.z * (pvoln - pvol) - (double)topo->npart * log(pvoln/pvol) / sim->temper;
enermove += calc_energy(0, intfce, 0, topo, conf, sim,0);
}
if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */
conf->box.x -= psch;
conf->box.y -= psch;
sim->edge.rej++;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->edge.acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
break;
case 3:
/* Isotropic pressure coupling in xy, z coupled to have fixed volume */
psch = sim->edge.mx * (ran2(&seed) - 0.5);
pvol = conf->box.x * conf->box.y * conf->box.z;
conf->box.x += psch;
conf->box.y += psch;
conf->box.z = pvol / conf->box.x / conf->box.y;
reject = 0;
if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 1: sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli);
break;
case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh);
sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]);
break;
case 4:
sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli);
break;
case 5:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm));
break;
case 6:
radiusholemax_orig = sim->wl.radiusholemax;
longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax);
sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
break;
case 7:
sim->wl.partincontactold = sim->wl.partincontact;
sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli);
break;
default:
sim->wl.neworder[wli] = sim->wl.currorder[wli];
break;
}
if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1;
}
if (!reject) {
wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]];
energy += wlener;
}
}
if (!reject) { /* wang-landaou ok, try move - calculate energy */
enermove = calc_energy(0, intfce, 0, topo, conf, sim,0);
}
if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */
conf->box.x -= psch;
conf->box.y -= psch;
conf->box.z = pvol / conf->box.x / conf->box.y;
sim->edge.rej++;
wlreject(sim,radiusholemax_orig);
} else { /* move was accepted */
sim->edge.acc++;
wlaccept(sim->wlm[0],&sim->wl);
edriftchanges = enermove - energy + wlener;
}
break;
default:
fprintf (stderr, "ERROR: unknown type of pressure coupling %d",sim->ptype);
exit(1);
}
/*=== End volume change step ===*/
return edriftchanges;
}
/*..................... Switch replicas move in MPI ..............................*/
/*.................................................................................*/
double replicaexchangemove(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *), long sweep )
{
double edriftchanges=0.0;
#ifdef MPI
double change, *recwlweights;
MPI_Status status;
int oddoreven,count,wli,sizewl = 0;
struct mpiexchangedata localmpi,receivedmpi;
BOOL reject;
long localwl,receivedwl;
double ran2(long *);
void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf);
int longarray_cpy (long **target, long **source,long,long);
int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim);
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn);
void wlaccept(int, struct wls *);
//int mpi_newdatatypes();
//mpi_newdatatypes();
int i;
struct vector vec;
struct particles part;
struct mpiexchangedata exch;
MPI_Aint dispstart;
MPI_Datatype MPI_vector;
MPI_Datatype type[3] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE};
int blocklen[3] = {1, 1, 1};
MPI_Aint disp[3];
MPI_Address( &vec, &dispstart);
MPI_Address( &(vec.x), &disp[0]);
MPI_Address( &(vec.y), &disp[1]);
MPI_Address( &(vec.z), &disp[2]);
for (i=0; i <3; i++) disp[i] -= dispstart;
MPI_Type_struct( 3, blocklen, disp, type, &MPI_vector);
MPI_Type_commit( &MPI_vector);
MPI_Datatype MPI_Particle;
MPI_Datatype type2[11] = {MPI_vector,MPI_vector,MPI_vector,MPI_vector,MPI_vector, MPI_LONG, MPI_LONG, MPI_INT,MPI_INT,MPI_DOUBLE, MPI_INT};
int blocklen2[11] = {1, 1, 2,4,2,1,1,1,1,1,1,};
MPI_Aint disp2[11];
MPI_Address( &part, &dispstart);
MPI_Address( &(part.pos), &disp2[0]);
MPI_Address( &(part.dir), &disp2[1]);
MPI_Address( &(part.patchdir), &disp2[2]);
MPI_Address( &(part.patchsides), &disp2[3]);
MPI_Address( &(part.chdir), &disp2[4]);
MPI_Address( &(part.chaint), &disp2[5]);
MPI_Address( &(part.chainn), &disp2[6]);
MPI_Address( &(part.type), &disp2[7]);
MPI_Address( &(part.switchtype), &disp2[8]);
MPI_Address( &(part.delta_mu), &disp2[9]);
MPI_Address( &(part.switched), &disp2[10]);
for (i=0; i <11; i++) disp2[i] -= dispstart;
MPI_Type_struct( 11, blocklen2, disp2, type2, &MPI_Particle);
MPI_Type_commit( &MPI_Particle);
if (sim->wl.length[1] > 0) {
sizewl = sim->wl.length[1] * sim->wl.length[0];
} else {
sizewl = sim->wl.length[0];
}
MPI_Datatype MPI_exchange;
MPI_Datatype type3[7] = {MPI_vector, MPI_DOUBLE, MPI_DOUBLE, MPI_INT, MPI_vector, MPI_LONG, MPI_LONG};
int blocklen3[7] = {1, 1, 1, 1, 1, 1, 2};
MPI_Aint disp3[7];
MPI_Address( &exch, &dispstart);
MPI_Address( &(exch.box), &disp3[0]);
MPI_Address( &(exch.energy), &disp3[1]);
MPI_Address( &(exch.volume), &disp3[2]);
MPI_Address( &(exch.accepted), &disp3[3]);
MPI_Address( &(exch.syscm), &disp3[4]);
MPI_Address( &(exch.radiusholemax), &disp3[5]);
MPI_Address( &(exch.wl_order), &disp3[6]);
for (i=0; i <7; i++) disp3[i] -= dispstart;
MPI_Type_struct(7, blocklen3, disp3, type3, &MPI_exchange);
MPI_Type_commit( &MPI_exchange);
/*=== This is an attempt to switch replicas ===*/
localmpi.box = conf->box;
localmpi.energy = calc_energy(0, intfce, 0, topo, conf, sim,0);
localmpi.volume = conf->box.x * conf->box.y * conf->box.z;
localmpi.accepted = 0;
localmpi.syscm = conf->syscm;
localmpi.radiusholemax = sim->wl.radiusholemax;
recwlweights = malloc( sizeof(double) * sizewl );
for (wli=0;wli<2;wli++) {
localmpi.wl_order[wli] = 0;
receivedmpi.wl_order[wli] = 0;
}
for (wli=0;wli<sim->wl.wlmdim;wli++) {
localmpi.wl_order[wli] = sim->wl.currorder[wli];
//fprintf(stdout,"wli %d %ld %ld\n\n", wli, localmpi.wl_order[wli], sim->wl.currorder[wli] );
}
if ( (sweep % (2*sim->nrepchange)) == 0)
/* exchange odd ones with even ones*/
oddoreven=1;
else
/* exchange even ones with odd ones*/
oddoreven=0;
if (sim->mpinprocs == 2)
oddoreven=1;
count = 1;
if (sim->mpirank % 2 == oddoreven) {
if (sim->mpirank > 0) {
MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank-1, count, MPI_COMM_WORLD);
MPI_Send(sim->wl.weights, sizewl, MPI_DOUBLE, sim->mpirank-1, count, MPI_COMM_WORLD);
//printf("send data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,localmpi.energy,localmpi.volume,localmpi.pressure);
MPI_Recv(&receivedmpi, 1, MPI_exchange, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
/*decision of accepting or rejecting the exchange was done on other process
here we took received configuration (if move was accepted))*/
//printf("received data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,receivedmpi.energy,receivedmpi.volume,receivedmpi.pressure);
if (receivedmpi.accepted == 1) {
sim->mpiexch.acc++;
struct particles *temppart;
temppart = malloc(topo->npart*sizeof(struct particles));
MPI_Recv(temppart, topo->npart, MPI_Particle, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD,&status);
/* printf("received data: rank: %d\n", sim->mpirank);
printf("part0 x %f y %f z %f\n",temppart[0].pos.x, temppart[0].pos.y, temppart[0].pos.z);
printf("part1 x %f y %f z %f\n",temppart[1].pos.x, temppart[1].pos.y, temppart[1].pos.z);
printf("part0 chaint %ld chainn %ld type %d\n",temppart[0].chaint,temppart[0].chainn,temppart[0].type);
*/
MPI_Send(conf->particle, topo->npart, MPI_Particle, sim->mpirank-1, count, MPI_COMM_WORLD);
/* printf("send data: rank: %d\n",sim->mpirank);
printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z);
printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z);
printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type);
*/
localmpi.accepted = receivedmpi.accepted;
conf->box = receivedmpi.box;
conf->syscm = receivedmpi.syscm;
memcpy(conf->particle,temppart,topo->npart*sizeof(struct particles));
edriftchanges = receivedmpi.energy - localmpi.energy;
edriftchanges += sim->press * (receivedmpi.volume - localmpi.volume) - (double)topo->npart * log(receivedmpi.volume / localmpi.volume) / sim->temper;
if ( sim->wlm[0] >0 ) {
for (wli=0;wli<sim->wl.wlmdim;wli++) {
sim->wl.neworder[wli] = receivedmpi.wl_order[wli];
}
wlaccept(sim->wlm[0],&sim->wl);
//exchange wl data mesh size and radius hole s
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 2:
/*it is complicated to send because of different sizes
we would have to send sizes first and realocate corrrect mesh size and then send data
it is better to recalculate (a bit slower though)*/
mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim);
break;
case 5:
//radiushole_all(topo,conf,sim,wli,&(conf->syscm));
sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax);
MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD);
longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax);
sim->wl.radiusholemax=receivedmpi.radiusholemax;
break;
case 6:
//radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax);
MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD);
longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax);
sim->wl.radiusholemax=receivedmpi.radiusholemax;
break;
case 7:
//contparticles_all(topo,conf,sim,wli);
MPI_Recv(&(sim->wl.partincontactold),1, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Send(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD);
sim->wl.partincontact=sim->wl.partincontactold;
break;
}
}
}
free(temppart);
} else {
sim->mpiexch.rej++;
if ( sim->wlm[0] > 0 ) {
sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha;
sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++;
}
}
}
} else {
if (sim->mpirank+1 < sim->mpinprocs) {
/*there is above process*/
MPI_Recv(&receivedmpi, 1, MPI_exchange, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(recwlweights, sizewl, MPI_DOUBLE, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
/*we got new configuration*/
//printf("received data: rank: %d energy: %f volume: %f \n",sim->mpirank,receivedmpi.energy,receivedmpi.volume);
/*evaluate if accepte or reject the configuration*/
/*acc = exp( (1/sim->temper - 1/(sim->temper + sim.dtemp)) * (E_here - E_received) +
(sim->press /sim->temper - pressure_received /(sim.temper + sim->dtemp)) * (V_here - V_received)
if pressure the same it it simplier*/
reject = FALSE;
change = (1/sim->temper - 1/(sim->temper + sim->dtemp)) * (localmpi.energy - receivedmpi.energy);
//printf("acceptance decision: change: %f localE: %f receivedE: %f tempf: %f \n",change,localmpi.energy,receivedmpi.energy,(1/sim->temper - 1/(sim->temper + sim->dtemp)));
change += (sim->press/sim->temper - (sim->press + sim->dpress)/(sim->temper + sim->dtemp)) * (localmpi.volume - receivedmpi.volume);
//printf("pressf: %f \n",(sim->press/sim->temper - (sim->press + sim->dpress)/(sim->temper + sim->dtemp)));
if (sim->wlm[0] > 0) {
localwl = sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0];
receivedwl = receivedmpi.wl_order[0] + receivedmpi.wl_order[1]*sim->wl.length[0];
//fprintf(stdout,"decide wl %ld %ld %ld energychange: %f \n", receivedmpi.wl_order[0], receivedmpi.wl_order[1], receivedwl, change );
//fprintf(stdout,"local weights %ld %f %ld %f \n",localwl,sim->wl.weights[localwl],receivedwl,sim->wl.weights[receivedwl]);
change += (-sim->wl.weights[localwl] + sim->wl.weights[receivedwl] )/sim->temper + ( -recwlweights[receivedwl] + recwlweights[localwl])/(sim->temper + sim->dtemp) ;
//fprintf(stdout,"wlchange %f \n\n",change);
}
if ( (!(reject)) && ( (change > 0) || (ran2(&seed) < exp(change)) ) ) {
/* Exchange ACCEPTED send local stuff*/
//printf("exchange accepted \n");
sim->mpiexch.acc++;
localmpi.accepted = 1;
conf->box = receivedmpi.box;
conf->syscm = receivedmpi.syscm;
edriftchanges = receivedmpi.energy - localmpi.energy;
edriftchanges += sim->press * (receivedmpi.volume - localmpi.volume) - (double)topo->npart * log(receivedmpi.volume / localmpi.volume) / sim->temper;
//printf("edrift %f\n",edriftchanges);
if ( sim->wlm[0] > 0 ) {
for (wli=0;wli<sim->wl.wlmdim;wli++) {
sim->wl.neworder[wli] = receivedmpi.wl_order[wli];
}
wlaccept(sim->wlm[0],&sim->wl);
}
MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank+1, count, MPI_COMM_WORLD);
//printf("send data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,localmpi.energy,localmpi.volume,localmpi.pressure);
/*send and receive configuration*/
MPI_Send(conf->particle, topo->npart, MPI_Particle, sim->mpirank+1, count, MPI_COMM_WORLD);
/* printf("send data: rank: %d\n",sim->mpirank);
printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z);
printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z);
printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type);
*/
MPI_Recv(conf->particle, topo->npart, MPI_Particle, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD,&status);
/* printf("recieved data: rank: %d\n",sim->mpirank);
printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z);
printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z);
printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type);
*/
if ( sim->wlm[0] > 0 ) {
//exchange wl data mesh size and radius hole s
for (wli=0;wli<sim->wl.wlmdim;wli++) {
switch (sim->wlm[wli]) {
case 2:
/*it is complicated to send because of different sizes
we would have to send sizes first and realocate corrrect mesh size and then send data
it is better to recalculate (a bit slower though)*/
mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim);
break;
case 5:
//radiushole_all(topo,conf,sim,wli,&(conf->syscm));
sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax);
MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD);
MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax);
sim->wl.radiusholemax=receivedmpi.radiusholemax;
break;
case 6:
//radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos));
sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax);
MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD);
MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax);
sim->wl.radiusholemax=receivedmpi.radiusholemax;
break;
case 7:
//contparticles_all(topo,conf,sim,wli);
MPI_Send(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD);
MPI_Recv(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
break;
}
}
}
} else {
/*if exchange rejected send back info */
//printf("exchange rejected\n");
sim->mpiexch.rej++;
MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank+1, count, MPI_COMM_WORLD);
if ( sim->wlm[0] > 0 ) {
sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha;
sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++;
}
}
}
}
if ( (localmpi.accepted) && (sim->pairlist_update) )
gen_pairlist(topo, sim, conf);
MPI_Type_free(&MPI_exchange);
MPI_Type_free(&MPI_Particle);
MPI_Type_free(&MPI_vector);
free(recwlweights);
#endif
return edriftchanges;
}
/*int mpi_newdatatypes()
{
int i;
struct vector vec;
struct particles part;
struct mpiexchangedata exch;
MPI_Aint dispstart;
MPI_Datatype MPI_vector;
MPI_Datatype type[3] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE};
int blocklen[3] = {1, 1, 1};
MPI_Aint disp[3];
MPI_Address( &vec, &dispstart);
MPI_Address( &(vec.x), &disp[0]);
MPI_Address( &(vec.y), &disp[1]);
MPI_Address( &(vec.z), &disp[2]);
for (i=0; i <3; i++) disp[i] -= dispstart;
MPI_Type_struct( 3, blocklen, disp, type, &MPI_vector);
MPI_Type_commit( &MPI_vector);
MPI_Datatype MPI_Particle;
MPI_Datatype type2[11] = {MPI_vector,MPI_vector,MPI_vector,MPI_vector,MPI_vector, MPI_LONG, MPI_LONG, MPI_INT,MPI_INT,MPI_DOUBLE, MPI_INT};
int blocklen2[11] = {1, 1, 2,4,2,1,1,1,1,1,1,};
MPI_Aint disp2[11];
MPI_Address( &part, &dispstart);
MPI_Address( &(part.pos), &disp2[0]);
MPI_Address( &(part.dir), &disp2[1]);
MPI_Address( &(part.patchdir), &disp2[2]);
MPI_Address( &(part.patchsides), &disp2[3]);
MPI_Address( &(part.chdir), &disp2[4]);
MPI_Address( &(part.chaint), &disp2[5]);
MPI_Address( &(part.chainn), &disp2[6]);
MPI_Address( &(part.type), &disp2[7]);
MPI_Address( &(part.switchtype), &disp2[8]);
MPI_Address( &(part.delta_mu), &disp2[9]);
MPI_Address( &(part.switched), &disp2[10]);
for (i=0; i <11; i++) disp2[i] -= dispstart;
MPI_Type_struct( 11, blocklen2, disp2, type2, &MPI_Particle);
MPI_Type_commit( &MPI_Particle);
MPI_Datatype MPI_exchange;
MPI_Datatype type3[5] = {MPI_vector, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_INT};
int blocklen3[5] = {1, 1, 1, 1, 1};
MPI_Aint disp3[5];
MPI_Address( &exch, &dispstart);
MPI_Address( &(exch.box), &disp3[0]);
MPI_Address( &(exch.energy), &disp3[1]);
MPI_Address( &(exch.volume), &disp3[2]);
MPI_Address( &(exch.pressure), &disp3[3]);
MPI_Address( &(exch.accepted), &disp3[4]);
for (i=0; i <5; i++) disp3[i] -= dispstart;
MPI_Type_struct( 5, blocklen3, disp3, type3, &MPI_exchange);
MPI_Type_commit( &MPI_exchange);
return 0;
}*/
/*................................................................................*/
/*................................................................................*/
/*....................END OF MOVES, INTERACTION FUNCTIONS FOLLOW..................*/
/*................................................................................*/
/*..............................................................................*/
/*
Determines total energy of two spherocylinders type PSC PSC
*/
double e_psc_psc(struct interacts * interact)
{
double atrenergy, repenergy;
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double eattractive_psc_psc(struct interacts *,int,int);
closestdist(interact);
repenergy = erepulsive(interact);
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
BOOL firstCH=FALSE, secondCH=FALSE;
struct vector olddir1 = interact->part1->dir;
struct vector olddir2 = interact->part2->dir;
if ( (interact->param->geotype[0] == CHPSC)||(interact->param->geotype[0] == TCHPSC) )
firstCH = TRUE;
if ( (interact->param->geotype[1] == CHPSC)||(interact->param->geotype[1] == TCHPSC) )
secondCH = TRUE;
if (firstCH)
interact->part1->dir = interact->part1->chdir[0];
if (secondCH)
interact->part2->dir = interact->part2->chdir[0];
if ((firstCH) || (secondCH) ) {
closestdist(interact);
}
atrenergy = eattractive_psc_psc(interact,0,0);
/*addition of interaction of second patches*/
if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ||
(interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) {
BOOL firstT=FALSE, secondT=FALSE;
if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) )
firstT = TRUE;
if ( (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) )
secondT = TRUE;
if (firstT) {
if (firstCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_psc_psc(interact,1,0);
}
if ( (firstT) && (secondT) ) {
if (secondCH) {
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_psc_psc(interact,1,1);
}
if (secondT) {
if (firstT && firstCH ) {
interact->part1->dir = interact->part1->chdir[0];
closestdist(interact);
}
atrenergy += eattractive_psc_psc(interact,0,1);
}
}
if (firstCH)
interact->part1->dir = olddir1;
if (secondCH)
interact->part2->dir = olddir2;
}
return repenergy+atrenergy;
}
/*
Determines attractive energy of two spherocylinders type PSC PSC
*/
double eattractive_psc_psc(struct interacts * interact,int patchnum1,int patchnum2)
{
int i, intrs;
double rcut, atrenergy, ndist;
double v1, v2, f0, f1, f2, T1, T2, S1, S2, a;
double intersections[5];
struct vector vec1, vec2, vec_intrs, vec_mindist;
struct vector vec_sub(struct vector, struct vector);
struct vector vec_sum(struct vector, struct vector);
struct vector vec_create(double, double, double);
struct vector vec_scale(struct vector, double);
struct vector vec_perpproject(struct vector *, struct vector*);
struct vector mindist_segments(struct vector, double, struct vector, double, struct vector);
void normalise(struct vector *);
int psc_intersect(struct particles *, struct particles *,
double, double, struct vector, double *,double, struct ia_param *, int which, int patchnum);
double fanglscale(double, struct ia_param *, int which);
rcut = interact->param->rcut;
//interact->halfl = interact->param->half_len[0];
//DEBUG_SIM("halfl = %lf", interact->halfl);
for(i=0;i<5;i++)
intersections[i]=0;
//cospatch = param.pcanglsw;
//cospatchinr = param.pcangl;
/*1- do intersections of spherocylinder2 with patch of spherocylinder1 at.
cut distance C*/
//DEBUG_SIM("first intersection");
intrs=psc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1);
if (intrs <2){
//DEBUG_SIM("No intersection :(");
return 0.0; /*sc is all outside patch, attractive energy is 0*/
}
T1=intersections[0]; /*points on sc2*/
T2=intersections[1];
/*2- now do the same oposite way psc1 in patch of psc2*/
for(i=0;i<5;i++)
intersections[i]=0;
//DEBUG_SIM("get vector");
vec1=vec_scale(interact->r_cm,-1.0);
//DEBUG_SIM("second intersection");
intrs=psc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2);
if (intrs <2)
return 0.0; /*sc is all outside patch, attractive energy is 0*/
S1=intersections[0]; /*points on sc1*/
S2=intersections[1];
/*3- scaling function1: dependence on the length of intersetions*/
v1=fabs(S1-S2);
v2=fabs(T1-T2);
f0=0.5*(v1+v2);
/*4a- with two intersection pices calculate vector between their CM
-this is for angular orientation*/
vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5);
vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5);
vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x;
vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y;
vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z;
/*vec_intrs should be from sc1 to sc2*/
//fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z);
/*4b - calculate closest distance attractive energy from it*/
vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs);
//fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z);
ndist=sqrt(DOT(vec_mindist,vec_mindist));
//dist=DOT(vec_intrs,vec_intrs);
if (ndist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
//atrenergy = -1.0;
else {
atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*5- scaling function2: angular dependence of patch1*/
vec1=vec_scale(vec_intrs,1.0);
//vec1=vec_scale(vec_mindist,-1.0);
vec1=vec_perpproject(&vec1, &interact->part1->dir);
normalise(&vec1);
a = DOT(vec1,interact->part1->patchdir[patchnum1]);
f1 = fanglscale(a,interact->param, 0+2*patchnum1);
/*6- scaling function3: angular dependence of patch2*/
vec1=vec_scale(vec_intrs,-1.0);
//vec1=vec_scale(vec_mindist,1.0);
vec1=vec_perpproject(&vec1, &interact->part2->dir);
normalise(&vec1);
a = DOT(vec1,interact->part2->patchdir[patchnum2]);
f2 = fanglscale(a,interact->param, 1+2*patchnum2);
//printf("v1: %f v2: %f f0: %f f1: %f f2: %f ener: %f\n",v1,v2,f0,f1,f2,atrenergy);
/*7- put it all together*/
atrenergy *=f0*f1*f2;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
// fprintf (stderr, "attraction %.8f \n",atrenergy);
// exit(1);
return atrenergy;
}
/* a = r_ij * n_i */
double fanglscale(double a, struct ia_param * param, int which)
{
double f;
// TODO for different types
if (a <= param->pcanglsw[which])
f=0.0;
else {
if (a >= param->pcangl[which])
f=1.0;
else {
f = 0.5 - ((param->pcanglsw[which] + param->pcangl[which])*0.5 - a )/(param->pcangl[which] - param->pcanglsw[which]);
}
}
return f;
}
/*CPSC..............................................................................*/
/*
Determines total energy of two spherocylinders of type 3 -cylindrical psc -CPSC
*/
double e_cpsc_cpsc(struct interacts * interact)
{
double atrenergy, repenergy;
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double eattractive_cpsc_cpsc(struct interacts *,int,int);
//DEBUG_SIM("do energy 33") ;
closestdist(interact);
repenergy = erepulsive(interact);
//DEBUG_SIM("got the rep. energy");
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
BOOL firstCH=FALSE, secondCH=FALSE;
struct vector olddir1 = interact->part1->dir;
struct vector olddir2 = interact->part2->dir;
if ( (interact->param->geotype[0] == CHCPSC)||(interact->param->geotype[0] == TCHCPSC) )
firstCH = TRUE;
if ( (interact->param->geotype[1] == CHCPSC)||(interact->param->geotype[1] == TCHCPSC) )
secondCH = TRUE;
if(firstCH)
interact->part1->dir = interact->part1->chdir[0];
if(secondCH)
interact->part2->dir = interact->part2->chdir[0];
if ((firstCH) || (secondCH) ) {
closestdist(interact);
}
atrenergy = eattractive_cpsc_cpsc(interact,0,0);
/*addition of interaction of second patches*/
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ||
(interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) {
BOOL firstT=FALSE, secondT=FALSE;
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) )
firstT = TRUE;
if ( (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) )
secondT = TRUE;
if (firstT) {
if (firstCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_cpsc_cpsc(interact,1,0);
}
if ( (firstT) && (secondT) ) {
if (secondCH) {
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_cpsc_cpsc(interact,1,1);
}
if (secondT) {
if (firstT && firstCH ) {
interact->part1->dir = interact->part1->chdir[0];
closestdist(interact);
}
atrenergy += eattractive_cpsc_cpsc(interact,0,1);
}
}
if (firstCH)
interact->part1->dir = olddir1;
if (secondCH)
interact->part2->dir = olddir2;
}
return repenergy+atrenergy;
}
/*
Determines attractive energy of two spherocylinders of type 3 -cylindrical psc -CPSC
*/
double eattractive_cpsc_cpsc(struct interacts * interact, int patchnum1, int patchnum2)
{
int i, intrs;
double rcut, atrenergy, v1, v2, f0, f1, f2, T1, T2, S1, S2, a, ndist;
double intersections[5];
struct vector vec1, vec2, vec_intrs, vec_mindist;
struct vector vec_sub(struct vector, struct vector);
struct vector vec_sum(struct vector, struct vector);
struct vector vec_create(double, double, double);
struct vector vec_scale(struct vector, double);
struct vector vec_perpproject(struct vector*, struct vector*);
struct vector mindist_segments(struct vector, double, struct vector, double, struct vector);
void normalise(struct vector *);
int cpsc_intersect(struct particles *, struct particles *,
double, double, struct vector, double *,double, struct ia_param *, int which, int patchnum);
double fanglscale(double, struct ia_param *, int which);
rcut = interact->param->rcut;
// interact->halfl = interact->param->half_len[0];
for(i=0;i<5;i++)
intersections[i]=0;
/*1- do intersections of spherocylinder2 with patch of spherocylinder1 at.
cut distance C*/
intrs=cpsc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1);
if (intrs <2)
return 0.0; /*sc is all outside patch, attractive energy is 0*/
T1=intersections[0]; /*points on sc2*/
T2=intersections[1];
/*2- now do the same oposite way psc1 in patch of psc2*/
for(i=0;i<5;i++)
intersections[i]=0;
vec1=vec_scale(interact->r_cm,-1.0);
intrs=cpsc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2);
if (intrs <2)
return 0.0; /*sc is all outside patch, attractive energy is 0*/
S1=intersections[0]; /*points on sc1*/
S2=intersections[1];
/*3- scaling function1: dependence on the length of intersetions*/
v1=fabs(S1-S2);
v2=fabs(T1-T2);
f0=0.5*(v1+v2);
/*4a- with two intersection pices calculate vector between their CM
-this is for angular orientation*/
vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5);
vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5);
vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x;
vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y;
vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z;
/*vec_intrs should be from sc1 to sc2*/
// fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z);
/*4b - calculate closest distance attractive energy from it*/
vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs);
// fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z);
ndist=sqrt(DOT(vec_mindist,vec_mindist));
//dist=DOT(vec_intrs,vec_intrs);
if (ndist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*5- scaling function2: angular dependence of patch1*/
vec1=vec_scale(vec_intrs,1.0);
//vec1=vec_scale(vec_mindist,-1.0);
vec1=vec_perpproject(&vec1, &interact->part1->dir);
normalise(&vec1);
a = DOT(vec1,interact->part1->patchdir[patchnum1]);
f1 = fanglscale(a,interact->param, 0+2*patchnum1);
/*6- scaling function3: angular dependence of patch2*/
vec1=vec_scale(vec_intrs,-1.0);
//vec1=vec_scale(vec_mindist,1.0);
vec1=vec_perpproject(&vec1, &interact->part2->dir);
normalise(&vec1);
a = DOT(vec1,interact->part2->patchdir[patchnum2]);
f2 = fanglscale(a,interact->param, 1+2*patchnum2);
/*7- put it all together*/
atrenergy *=f0*f1*f2;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
// fprintf (stderr, "attraction %.8f \n",atrenergy);
// exit(1);
return atrenergy;
}
/*..............................................................................*/
/*
Determines total energy of spherocylinders type PSC and CPSC
*/
double e_psc_cpsc(struct interacts * interact)
{
double atrenergy, repenergy;
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double eattractive_psc_cpsc(struct interacts *,int,int);
//DEBUG_SIM("do energy 23") ;
closestdist(interact);
repenergy = erepulsive(interact);
//DEBUG_SIM("got the rep. energy");
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
BOOL firstCH=FALSE, secondCH=FALSE;
struct vector olddir1 = interact->part1->dir;
struct vector olddir2 = interact->part2->dir;
if ((interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == CHCPSC)||
(interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[0] == TCHCPSC) )
firstCH = TRUE;
if ((interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == CHCPSC)||
(interact->param->geotype[1] == TCHPSC) || (interact->param->geotype[1] == TCHCPSC) )
secondCH = TRUE;
if(firstCH)
interact->part1->dir = interact->part1->chdir[0];
if(secondCH)
interact->part2->dir = interact->part2->chdir[0];
if ((firstCH) || (secondCH) ) {
closestdist(interact);
}
atrenergy = eattractive_psc_cpsc(interact,0,0);
/*addition of interaction of second patches*/
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ||
(interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ||
(interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ||
(interact->param->geotype[1] == TPSC) || (interact->param->geotype[1] == TCHPSC) ) {
BOOL firstT=FALSE, secondT=FALSE;
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ||
(interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) )
firstT = TRUE;
if ( (interact->param->geotype[1] == TCPSC) || (interact->param->geotype[1] == TCHCPSC) ||
(interact->param->geotype[1] == TPSC) || (interact->param->geotype[1] == TCHPSC) )
secondT = TRUE;
if (firstT) {
if (firstCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_psc_cpsc(interact,1,0);
}
if ( (firstT) && (secondT) ) {
if (secondCH) {
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_psc_cpsc(interact,1,1);
}
if (secondT) {
if (firstT && firstCH ) {
interact->part1->dir = interact->part1->chdir[0];
closestdist(interact);
}
atrenergy += eattractive_psc_cpsc(interact,0,1);
}
}
if (firstCH)
interact->part1->dir = olddir1;
if (secondCH)
interact->part2->dir = olddir2;
}
return repenergy+atrenergy;
}
/*
Determines attractive energy of spherocylinders type PSC and CPSC
*/
double eattractive_psc_cpsc(struct interacts * interact,int patchnum1,int patchnum2)
{
int i, intrs;
double rcut, atrenergy, ndist;
double v1, v2, f0, f1, f2, T1, T2, S1, S2, a;
double intersections[5];
struct vector vec1, vec2, vec_intrs, vec_mindist;
struct vector vec_sub(struct vector, struct vector);
struct vector vec_sum(struct vector, struct vector);
struct vector vec_create(double, double, double);
struct vector vec_scale(struct vector, double);
struct vector vec_perpproject(struct vector*, struct vector*);
struct vector mindist_segments(struct vector, double, struct vector, double, struct vector);
void normalise(struct vector *);
int psc_intersect(struct particles *, struct particles *,
double, double, struct vector, double *,double, struct ia_param *, int which,int patchnum);
int cpsc_intersect(struct particles *, struct particles *,
double, double, struct vector, double *,double, struct ia_param *, int which,int patchnum);
double fanglscale(double, struct ia_param *, int which);
rcut = interact->param->rcut;
//interact->halfl = interact->param->half_len[0];
//DEBUG_SIM("halfl = %lf", interact->halfl);
for(i=0;i<5;i++)
intersections[i]=0;
BOOL first;
if ( (interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)||(interact->param->geotype[0] == TPSC)||(interact->param->geotype[0] == TCHPSC) ){
first = TRUE;
} else {
first = FALSE;
}
//cospatch = param.pcanglsw;
//cospatchinr = param.pcangl;
/*1- do intersections of spherocylinder2 with patch of spherocylinder1 at.
cut distance C*/
//DEBUG_SIM("first intersection");
if (first) {
intrs=psc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1);
} else {
intrs=cpsc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1);
}
//DEBUG_SIM("first intersection: done");
if (intrs <2){
//DEBUG_SIM("No intersection :(");
return 0.0; /*sc is all outside patch, attractive energy is 0*/
}
T1=intersections[0]; /*points on sc2*/
T2=intersections[1];
/*2- now do the same oposite way psc1 in patch of psc2*/
for(i=0;i<5;i++)
intersections[i]=0;
//DEBUG_SIM("get vector");
vec1=vec_scale(interact->r_cm,-1.0);
//DEBUG_SIM("second intersection");
if (first) {
intrs=cpsc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2);
} else {
intrs=psc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2);
}
if (intrs <2)
return 0.0; /*sc is all outside patch, attractive energy is 0*/
S1=intersections[0]; /*points on sc1*/
S2=intersections[1];
/*3- scaling function1: dependence on the length of intersetions*/
v1=fabs(S1-S2);
v2=fabs(T1-T2);
f0=0.5*(v1+v2);
/*4a- with two intersection pices calculate vector between their CM
-this is for angular orientation*/
vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5);
vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5);
vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x;
vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y;
vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z;
/*vec_intrs should be from sc1 to sc2*/
// fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z);
/*4b - calculate closest distance attractive energy from it*/
vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs);
// fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z);
ndist=sqrt(DOT(vec_mindist,vec_mindist));
//dist=DOT(vec_intrs,vec_intrs);
if (ndist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
//atrenergy = -1.0;
else {
atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*5- scaling function2: angular dependence of patch1*/
vec1=vec_scale(vec_intrs,1.0);
//vec1=vec_scale(vec_mindist,-1.0);
vec1=vec_perpproject(&vec1, &interact->part1->dir);
normalise(&vec1);
a = DOT(vec1,interact->part1->patchdir[patchnum1]);
f1 = fanglscale(a,interact->param, 0+2*patchnum1);
/*6- scaling function3: angular dependence of patch2*/
vec1=vec_scale(vec_intrs,-1.0);
//vec1=vec_scale(vec_mindist,1.0);
vec1=vec_perpproject(&vec1, &interact->part2->dir);
normalise(&vec1);
a = DOT(vec1,interact->part2->patchdir[patchnum2]);
f2 = fanglscale(a,interact->param, 1+2*patchnum2);
/*7- put it all together*/
atrenergy *=f0*f1*f2;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
// fprintf (stderr, "attraction %.8f \n",atrenergy);
// exit(1);
return atrenergy;
}
/*..............................................................................*/
/*
* Determines total energy of spherocylinder type 1 and sphere type 11
*/
double e_spa_sca(struct interacts * interact)
{
double atrenergy, repenergy, b, f0, halfl;
struct vector vec_perpproject(struct vector *, struct vector *);
void normalise(struct vector *);
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double fanglscale(double, struct ia_param *, int which);
//DEBUG printf ("do energy 111 \n\n");
closestdist(interact);
repenergy = erepulsive(interact);
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
/*calculate closest distance attractive energy*/
if (interact->dist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*scaling function for the length of spherocylinder within cutoff*/
if (interact->param->geotype [0] < SP)
halfl = interact->param->half_len[0];
else
halfl = interact->param->half_len[1];
b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist);
if ( interact->contt + b > halfl )
f0 = halfl;
else
f0 = interact->contt + b;
if ( interact->contt - b < -halfl )
f0 -= -halfl;
else
f0 -= interact->contt - b;
atrenergy *= f0;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
//fprintf (stderr, "attraction211 %.8f x: %.8f y: %.8f z: %.8f \n",atrenergy,vec1.x,vec1.y,vec1.z);
//exit(1);
}
return repenergy+atrenergy;
}
/*..............................................................................*/
/*
* Determines total energy of spherocylinder type 2 and sphere type 11
*/
double e_psc_spa(struct interacts * interact)
{
double atrenergy, repenergy;
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double eattractive_psc_spa(struct interacts *, int);
//DEBUG_SIM("do energy 211") ;
closestdist(interact);
repenergy = erepulsive(interact);
//DEBUG_SIM("got the rep. energy");
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
BOOL firstCH=FALSE, secondCH=FALSE;
struct vector olddir1 = interact->part1->dir;
struct vector olddir2 = interact->part2->dir;
if ( (interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == TCHPSC) )
firstCH = TRUE;
if ( (interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == TCHPSC) )
secondCH = TRUE;
if(firstCH)
interact->part1->dir = interact->part1->chdir[0];
if(secondCH)
interact->part2->dir = interact->part2->chdir[0];
if ((firstCH) || (secondCH) ) {
closestdist(interact);
}
atrenergy = eattractive_psc_spa(interact,0);
/*addition of interaction of second patches*/
if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ||
(interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) {
BOOL firstT=FALSE, secondT=FALSE;
if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) )
firstT = TRUE;
if ( (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) )
secondT = TRUE;
if (firstT) {
if (firstCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_psc_spa(interact,1);
}
if (secondT) {
if(secondCH) {
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_psc_spa(interact,1);
}
if ( (firstT) && (secondT) ) {
fprintf (stderr, "ERROR PSC should interact s SPA but got two PSC \n");
exit(1);
}
}
if (firstCH)
interact->part1->dir = olddir1;
if (secondCH)
interact->part2->dir = olddir2;
}
return repenergy+atrenergy;
}
/*
* Determines attractive energy of spherocylinder type 2 and sphere type 11
*/
double eattractive_psc_spa(struct interacts * interact, int patchnum1)
{
double atrenergy, a, b, f0, halfl;
struct vector vec1;
struct vector vec_perpproject(struct vector *, struct vector*);
void normalise(struct vector *);
double fanglscale(double, struct ia_param *, int which);
int which;
/*calculate closest distance attractive energy*/
if (interact->dist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*scaling function: angular dependence of patch1*/
if (interact->param->geotype[0] < SP) {
which = 0;
vec1=vec_perpproject(&interact->distvec, &interact->part1->dir);
normalise(&vec1);
a = DOT(vec1,interact->part1->patchdir[patchnum1]);
halfl=interact->param->half_len[0];
} else {
which = 1;
vec1=vec_perpproject(&interact->distvec, &interact->part2->dir);
normalise(&vec1);
a = DOT(vec1,interact->part2->patchdir[patchnum1]);
halfl=interact->param->half_len[1];
}
/*scaling function for the length of spherocylinder within cutoff*/
b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist);
if ( interact->contt + b > halfl )
f0 = halfl;
else
f0 = interact->contt + b;
if ( interact->contt - b < -halfl )
f0 -= -halfl;
else
f0 -= interact->contt - b;
atrenergy *= fanglscale(a,interact->param, which)*f0;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
//fprintf (stderr, "attraction211 %.8f x: %.8f y: %.8f z: %.8f \n",atrenergy,vec1.x,vec1.y,vec1.z);
//exit(1);
return atrenergy;
}
/*..............................................................................*/
/*
Determines total energy of spherocylinder type 3 and sphere type 11
*/
double e_cpsc_spa(struct interacts * interact)
{
double atrenergy, repenergy, halfl;
void closestdist(struct interacts *);
double erepulsive(struct interacts *);
double eattractive_cpsc_spa(struct interacts *,int);
//DEBUG_SIM("do energy 311") ;
closestdist(interact);
repenergy = erepulsive(interact);
//DEBUG_SIM("got the rep. energy");
if (interact->param->geotype[0] < SP) {
halfl=interact->param->half_len[0];
} else {
halfl=interact->param->half_len[1];
}
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) || ( interact->dist > interact->param->rcut )
|| (interact->contt > halfl) || (interact->contt < -halfl) )
atrenergy = 0.0;
else {
BOOL firstCH=FALSE, secondCH=FALSE;
struct vector olddir1 = interact->part1->dir;
struct vector olddir2 = interact->part2->dir;
if ( (interact->param->geotype[0] == CHCPSC) || (interact->param->geotype[0] == TCHCPSC) )
firstCH = TRUE;
if ( (interact->param->geotype[1] == CHCPSC) || (interact->param->geotype[1] == TCHCPSC) )
secondCH = TRUE;
if(firstCH)
interact->part1->dir = interact->part1->chdir[0];
if(secondCH)
interact->part2->dir = interact->part2->chdir[0];
if ((firstCH) || (secondCH) ) {
closestdist(interact);
}
atrenergy = eattractive_cpsc_spa(interact,0);
/*addition of interaction of second patches*/
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ||
(interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) {
BOOL firstT=FALSE, secondT=FALSE;
if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) )
firstT = TRUE;
if ( (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) )
secondT = TRUE;
if (firstT) {
if (firstCH) {
interact->part1->dir = interact->part1->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_cpsc_cpsc(interact,1,0);
}
if (secondT) {
if(secondCH) {
interact->part2->dir = interact->part2->chdir[1];
closestdist(interact);
}
atrenergy += eattractive_cpsc_cpsc(interact,0,1);
}
if ( (firstT) && (secondT) ) {
fprintf (stderr, "ERROR PSC should interact s SPA but got two PSC \n");
exit(1);
}
}
if (firstCH)
interact->part1->dir = olddir1;
if (secondCH)
interact->part2->dir = olddir2;
}
return repenergy+atrenergy;
}
/*
Determines attractive energy of spherocylinder type 3 and sphere type 11
*/
double eattractive_cpsc_spa(struct interacts * interact,int patchnum1)
{
double atrenergy, a, b, f0, halfl;
struct vector vec1;
int which;
struct vector vec_perpproject(struct vector *, struct vector*);
void normalise(struct vector *);
double fanglscale(double, struct ia_param *, int which);
/*if it is in cylindrical part c>-halfl and c<halfl*/
/*calculate closest distance attractive energy*/
if (interact->dist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
/*scaling function: angular dependence of patch1*/
if (interact->param->geotype[0] < SP) {
which = 0;
vec1=vec_perpproject(&interact->distvec, &interact->part1->dir);
normalise(&vec1);
a = DOT(vec1,interact->part1->patchdir[patchnum1]);
halfl = interact->param->half_len[0];
} else {
which = 1;
vec1=vec_perpproject(&interact->distvec, &interact->part2->dir);
normalise(&vec1);
a = DOT(vec1,interact->part2->patchdir[patchnum1]);
halfl = interact->param->half_len[1];
}
/*scaling function for the length of spherocylinder within cutoff*/
b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist);
if ( interact->contt + b > halfl )
f0 = halfl;
else
f0 = interact->contt + b;
if ( interact->contt - b < -halfl )
f0 -= -halfl;
else
f0 -= interact->contt - b;
atrenergy *= fanglscale(a,interact->param, which)*f0;
//if (atrenergy < 0) printf ("atraction %f\n",atrenergy);
//fprintf (stderr, "attraction311 %.8f a: %.8f\n",atrenergy,a);
//exit(1);
return atrenergy;
}
/*..............................................................................*/
/*
Determines total energy of two spherocylinders type 11
*/
double e_2sca_or_2spa(struct interacts * interact)
{
double repenergy, atrenergy;
double erepulsive(struct interacts *);
void closestdist(struct interacts *);
closestdist(interact);
repenergy = erepulsive(interact);
if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) )
atrenergy = 0.0;
else {
if (interact->dist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon ;
}
}
return repenergy+atrenergy;
}
/*..............................................................................*/
/*
Determines total energy with purely repulsive types
*/
double e_spn_or_scn(struct interacts * interact)
{
double repenergy;
double erepulsive(struct interacts *);
void closestdist(struct interacts *);
closestdist(interact);
repenergy = erepulsive(interact);
return repenergy;
}
/*..............................................................................*/
/*
Determines repulsive energy of two spherocylinders
*/
double erepulsive(struct interacts * interact)
{
double repenergy, en6;
/* WCA repulsion */
if (interact->dist > interact->param->rcutwca) repenergy = 0.0;
else {
en6 = pow((interact->param->sigma/interact->dist),6);
repenergy = 4*en6*(en6-1) + 1.0;
}
//printf("repenergy: %f dist: %f\n",repenergy, interact->dist);
return repenergy;
}
/*..............................................................................*/
/*
Indicates not yet programmed interaction
*/
double enoexist(struct interacts * interact)
{
double energy=0.0;
fprintf (stderr, "ERROR: We have not programed interaction of types %d and %d\n",
interact->part1->type,interact->part2->type);
exit (1);
return energy;
}
/* function for calculation of harmonic potential*/
double harmonic(double aktualvalue, double eqvalue, double springconst)
{
return springconst*(aktualvalue-eqvalue)*(aktualvalue-eqvalue)*0.5;
}
/*..............................................................................*/
/*
Determines bond energy
*/
double bondenergy(long num1, long num2, struct interacts * interact, struct topo * topo, struct conf * conf)
{
double energy=0.0, bondlength, halfl;
struct vector vec1, vec2, vecbond;
int * geotype = interact->param->geotype;
struct vector image(struct vector, struct vector, struct vector);
double harmonic(double, double, double);
/*interaction with nearest neighbours -harmonic*/
if ((topo->chainparam[conf->particle[num1].chaint]).bond1c >= 0) {
if (num2 == topo->conlist[num1][1]) {
/*num1 is connected to num2 by tail*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c);
else {
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec1.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x;
vec1.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y;
vec1.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z;
if (geotype[1] < SP)
halfl=interact->param->half_len[1];
else
halfl = 0.0;
vec2.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * halfl /conf->box.x;
vec2.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * halfl /conf->box.y;
vec2.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * halfl /conf->box.z;
vecbond = image(vec1, vec2, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c);
}
} else {
if (num2 == topo->conlist[num1][0]) {
/*num1 is connected to num2 by head*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c);
else {
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec1.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * halfl /conf->box.x;
vec1.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * halfl /conf->box.y;
vec1.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * halfl /conf->box.z;
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec2.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x;
vec2.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y;
vec2.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z;
vecbond = image(vec1, vec2, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c);
}
}
}
}
/*interaction with second nearest neighbours -harmonic*/
if (topo->chainparam[conf->particle[num1].chaint].bond2c >= 0) {
if (num2 == topo->conlist[num1][2]) {
/*num1 is connected to num2 by tail*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c);
else {
vecbond = image(conf->particle[num1].pos, conf->particle[num2].pos, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c);
}
} else {
if (num2 == topo->conlist[num1][3]) {
/*num1 is connected to num2 by head*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c);
else {
vecbond = image(conf->particle[num1].pos, conf->particle[num2].pos, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c);
}
}
}
}
/*interaction with nearest neighbours - direct harmonic bond*/
if ((topo->chainparam[conf->particle[num1].chaint]).bonddc > 0) {
if (num2 == topo->conlist[num1][1]) {
/*num1 is connected to num2 by tail*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bonddeq,topo->chainparam[conf->particle[num1].chaint].bonddc);
else {
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec1.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x;
vec1.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y;
vec1.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z;
if (geotype[1] < SP)
halfl=interact->param->half_len[1];
else
halfl = 0.0;
vec2.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.x ;
vec2.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.y ;
vec2.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.z ;
vecbond = image(vec1, vec2, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,0.0,topo->chainparam[conf->particle[num1].chaint].bonddc);
}
} else {
if (num2 == topo->conlist[num1][0]) {
/*num1 is connected to num2 by head*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c);
else {
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec1.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.x ;
vec1.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.y ;
vec1.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.z ;
if (geotype[0] < SP)
halfl=interact->param->half_len[0];
else
halfl = 0.0;
vec2.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x;
vec2.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y;
vec2.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z;
vecbond = image(vec1, vec2, conf->box);
bondlength = sqrt(DOT(vecbond,vecbond));
energy = harmonic(bondlength,0.0,topo->chainparam[conf->particle[num1].chaint].bonddc);
}
}
}
}
//printf("bondlength: %f\n",bondlength);
// printf("bondener: %f\n",energy);
return energy;
}
/*..............................................................................*/
/*
Determines angle energy between spherocylinders
*/
double angleenergy(long num1, long num2, struct interacts * interact, struct topo * topo, struct conf * conf)
{
double energy=0.0, currangle, halfl;
struct vector vec1, vec2;
int * geotype = interact->param->geotype;
struct vector image(struct vector, struct vector, struct vector);
void normalise(struct vector *);
double harmonic(double, double, double);
/*angle interaction with nearest neighbours -harmonic*/
if ((topo->chainparam[conf->particle[num1].chaint]).angle1c >= 0) {
if (num2 == topo->conlist[num1][0]) {
/*num1 is connected to num2 by tail*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
/*spheres do not have this interaction*/
energy += 0.0;
else {
if (geotype[0] < SP)
vec1 = conf->particle[num1].dir;
else {
halfl=interact->param->half_len[1];
//sphere angle is defined versus the end of spherocylinder
vec1.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x;
vec1.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y;
vec1.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z;
vec1 = image(vec1, conf->particle[num1].pos, conf->box);
}
if (geotype[1] < SP)
vec2 = conf->particle[num2].dir;
else {
halfl=interact->param->half_len[0];
vec2.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * halfl /conf->box.x;
vec2.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * halfl /conf->box.y;
vec2.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * halfl /conf->box.z;
vec2 = image(vec2, conf->particle[num2].pos, conf->box);
}
normalise(&vec1);
normalise(&vec2);
currangle = acos(DOT(vec1,vec2));
energy += harmonic(currangle,topo->chainparam[conf->particle[num1].chaint].angle1eq,topo->chainparam[conf->particle[num1].chaint].angle1c);
}
} else {
if (num2 == topo->conlist[num1][1]) {
/*num1 is connected to num2 by head*/
if ( (geotype[0] >= SP) && (geotype[1] >= SP) )
/*spheres do not have this interaction*/
energy += 0.0;
else {
if (geotype[0] < SP)
vec1 = conf->particle[num1].dir;
else {
halfl=interact->param->half_len[1];
//sphere angle is defined versus the end of spherocylinder
vec1.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * halfl /conf->box.x;
vec1.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * halfl /conf->box.y;
vec1.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * halfl /conf->box.z;
vec1 = image(vec1, conf->particle[num1].pos, conf->box);
}
if (geotype[1] < SP)
vec2 = conf->particle[num2].dir;
else {
halfl=interact->param->half_len[0];
vec2.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x;
vec2.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y;
vec2.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z;
vec2 = image(vec2, conf->particle[num2].pos, conf->box);
}
normalise(&vec1);
normalise(&vec2);
currangle = acos(DOT(vec1,vec2));
energy += harmonic(currangle,topo->chainparam[conf->particle[num2].chaint].angle1eq,topo->chainparam[conf->particle[num2].chaint].angle1c);
}
}
}
}
/*interaction between the orientation of spherocylinders patches -harmonic*/
if (topo->chainparam[conf->particle[num1].chaint].angle2c >= 0) {
if (num2 == topo->conlist[num1][0]) {
/*num1 is connected to num2 by tail*/
if ( (geotype[0] < SP) && (geotype[1] < SP) ) {
currangle = acos(DOT(conf->particle[num1].patchdir[0],conf->particle[num2].patchdir[0]) - DOT(conf->particle[num1].dir,conf->particle[num2].patchdir[0]) );
energy += harmonic(currangle,topo->chainparam[conf->particle[num1].chaint].angle2eq,topo->chainparam[conf->particle[num1].chaint].angle2c);
} else {
energy += 0.0;
}
} else {
if (num2 == topo->conlist[num1][1]) {
/*num1 is connected to num2 by head*/
if ( (geotype[0] < SP) && (geotype[1] < SP) ) {
currangle = acos(DOT(conf->particle[num2].patchdir[0],conf->particle[num1].patchdir[0]) - DOT(conf->particle[num2].dir,conf->particle[num1].patchdir[0]) );
energy += harmonic(currangle,topo->chainparam[conf->particle[num2].chaint].angle2eq,topo->chainparam[conf->particle[num2].chaint].angle2c);
} else {
energy += 0.0;
}
}
}
}
// printf("angleener: %f\n",energy);
return energy;
}
/* cluses distance calculation*/
void closestdist(struct interacts * interact)
{
double c, d, halfl;
struct vector mindist_segments(struct vector dir1, double halfl1,
struct vector dir2, double halfl2, struct vector r_cm);
double linemin(double, double);
//printf("we have %d %d ",interact->param->geotype[0],interact->param->geotype[1] );
if ((interact->param->geotype[0] >= SP) && (interact->param->geotype[1] >= SP)) { /*we have two spheres - most common, do nothing*/
//printf("we have two spheres ");
interact->distvec = interact->r_cm;
interact->dist = sqrt(interact->dotrcm);
interact->distcm = interact->dist;
} else {
if ((interact->param->geotype[0] < SP) && (interact->param->geotype[1] < SP)) { /*we have two spherocylinders*/
interact->distvec = mindist_segments(interact->part1->dir,interact->param->half_len[0],
interact->part2->dir, interact->param->half_len[1], interact->r_cm);
interact->dist=sqrt(DOT(interact->distvec,interact->distvec));
} else {
if (interact->param->geotype[0] < SP) { /*We have one spherocylinder -it is first one*/
halfl=interact->param->half_len[0];/*finding closest vector from sphyrocylinder to sphere*/
c = DOT(interact->part1->dir,interact->r_cm);
if (c >= halfl) d = halfl;
else {
if (c > -halfl) d = c;
else d = -halfl;
}
interact->contt = c;
interact->distvec.x = - interact->r_cm.x + interact->part1->dir.x * d;
interact->distvec.y = - interact->r_cm.y + interact->part1->dir.y * d;
interact->distvec.z = - interact->r_cm.z + interact->part1->dir.z * d;
interact->dist=sqrt(DOT(interact->distvec,interact->distvec));
} else { /*lst option first one is sphere second one spherocylinder*/
halfl=interact->param->half_len[1]; /*finding closest vector from sphyrocylinder to sphere*/
c = DOT(interact->part2->dir,interact->r_cm);
if (c >= halfl) d = halfl;
else {
if (c > -halfl) d = c;
else d = -halfl;
}
interact->contt = -c;
interact->distvec.x = interact->r_cm.x - interact->part2->dir.x * d;
interact->distvec.y = interact->r_cm.y - interact->part2->dir.y * d;
interact->distvec.z = interact->r_cm.z - interact->part2->dir.z * d;
interact->dist=sqrt(DOT(interact->distvec,interact->distvec));
}
}
}
}
/*..............................................................................*/
/*
Determines energy of two particles
*/
double paire(long num1, long num2, double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct conf * conf)
{
double energy=0.0; /* energy*/
struct vector r_cm; /* Vector between centres of mass from part2 to part1*/
struct interacts interact; /*interaction parameters*/
double bondenergy(long, long, struct interacts *, struct topo * topo, struct conf * conf);
double angleenergy(long, long, struct interacts *, struct topo * topo, struct conf * conf);
/*Placing interactin particle in unit box and finding vector connecting CM*/
/*r_cm = image(part1.pos, part2.pos, box); explicit statement below for performance optimization*/
r_cm.x = conf->particle[num1].pos.x - conf->particle[num2].pos.x;
r_cm.y = conf->particle[num1].pos.y - conf->particle[num2].pos.y;
r_cm.z = conf->particle[num1].pos.z - conf->particle[num2].pos.z;
if ( r_cm.x < 0 )
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) );
else
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) );
if ( r_cm.y < 0 )
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) );
else
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) );
if ( r_cm.z < 0 )
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) );
else
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) );
interact.dotrcm = DOT(r_cm,r_cm);
if ( interact.dotrcm > topo->sqmaxcut) return 0.0; /* distance so far that even spherocylinders cannot be within cutoff */
interact.r_cm=r_cm;
interact.contt = 0;
interact.distvec.x = 0;
interact.distvec.y = 0;
interact.distvec.z = 0;
interact.box = conf->box;
interact.part1 = &conf->particle[num1];
interact.part2 = &conf->particle[num2];
interact.param = topo->ia_params[conf->particle[num1].type] + conf->particle[num2].type;
if(intfce[conf->particle[num1].type][conf->particle[num2].type] == NULL){
fprintf(stderr, "interaction function for type %d and %d not defined!\n",
conf->particle[num1].type, conf->particle[num2].type);
}
energy = (*intfce[conf->particle[num1].type][conf->particle[num2].type])( &interact);
//printf("num: %ld %ld e: %f dist: %f",num1,num2,energy,interact.dist);
energy += bondenergy ( num1, num2, &interact, topo, conf);
energy += angleenergy ( num1, num2, &interact, topo, conf);
//printf(" e: %f\n",energy);
return energy;
}
/*...........................................................................*/
/*Calculates interaction of target particle and external field version 2
calculate projection of spherocylinder in direction of patch and calculate
interacting line segment within cutoff
*/
double extere2 (long target, struct topo * topo, struct conf * conf)
{
double repenergy=0.0,atrenergy=0.0; /* energy*/
double rcmz; /* z distance between*/
double ndist; /* distance for CM of interacting line segment*/
double interendz; /* z coordinate of interaction end*/
struct interacts interact; /* interaction parameters*/
double orient;
double halfl;
BOOL positive, orientin;
struct vector olddir;
struct vector project; /*vector for projection down to plane */
double erepulsive(struct interacts *);
// struct vector vec_perpproject(struct vector*, struct vector*);
// void normalise(struct vector *);
double fanglscale(double, struct ia_param *, int which);
void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient,
double *rcmz,double *interendz, struct vector *project);
double exter2_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive,
double orient,struct vector *project, double *ndist,int, double );
/* calcualte distance to center of mass*/
if ( conf->particle[target].pos.z < 0 ) {
rcmz = conf->box.z * (conf->particle[target].pos.z - (double)( (long)(conf->particle[target].pos.z - 0.5) ) );
} else {
rcmz = conf->box.z * (conf->particle[target].pos.z - (double)( (long)(conf->particle[target].pos.z + 0.5) ) );
}
project.x=0;
project.y=0;
if (rcmz < 0) {
interact.dist = -rcmz;
positive = FALSE;
interendz = -1.0;
project.z = 1.0;
} else {
interact.dist = rcmz;
positive = TRUE;
interendz = 1.0;
project.z = -1.0;
}
interact.dotrcm = rcmz * rcmz;
if ( interact.dotrcm > topo->exter.sqmaxcut) return 0.0; /* distance so far that even spherocylinders cannot be within cutoff */
interact.distvec.z = interact.r_cm.z;
interact.distcm = interact.dist;
interact.box = conf->box;
interact.part1 = &conf->particle[target];
interact.param = &topo->exter.interactions[conf->particle[target].type];
halfl = 0.5* topo->exter.interactions[conf->particle[target].type].len[0];
ndist = interact.dist;
orientin = TRUE;
orient = 0.0;
exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project);
/* now we have closest distance so we can calculate repulsion*/
repenergy = erepulsive(&interact);
//printf("dist: %f",interact.dist);
/*save chiral stuff*/
olddir = interact.part1->dir;
if ((interact.param->geotype[0] == CHCPSC)||(interact.param->geotype[0] == CHPSC)) {
interact.part1->dir = interact.part1->chdir[0];
exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project);
}
if (( interact.dist > interact.param->rcut ) || (interact.param->epsilon == 0.0 ) ||
( (interact.part1->patchdir[0].z >0)&&(positive) ) || ( (interact.part1->patchdir[0].z <0)&&(!(positive)) ) )
atrenergy = 0.0;
else {
atrenergy = exter2_atre(&interact,&orientin,&rcmz,&interendz,&positive,orient,&project,&ndist,0,halfl);
}
if ((interact.param->geotype[0] == TCPSC)||(interact.param->geotype[0] == TPSC)||
(interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC)) {
if ((interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC)) {
interact.part1->dir = interact.part1->chdir[1];
exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project);
}
exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project);
if (( interact.dist > interact.param->rcut ) || (interact.param->epsilon == 0.0 ) ||
( (interact.part1->patchdir[1].z >0)&&(positive) ) || ( (interact.part1->patchdir[1].z <0)&&(!(positive)) ) )
atrenergy += 0.0;
else {
atrenergy += exter2_atre(&interact,&orientin,&rcmz,&interendz,&positive,orient,&project,&ndist,1,halfl);
}
}
if ((interact.param->geotype[0] == CHCPSC)||(interact.param->geotype[0] == CHPSC)||
(interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC) ) {
interact.part1->dir = olddir;
}
//printf("%f %f \n",conf->particle[target].pos.z*conf->box.z,repenergy+atrenergy);
return repenergy+atrenergy;
}
double exter2_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int numpatch, double halfl)
{
struct vector pbeg,pend; /* projected spherocylinder begining and end*/
double a,length1,length2, f0,f1;
struct vector cm1,cm2; /* centrar of interacting segments */
int line;
struct vector partbeg,partend; /*closest and furthest point of particle*/
struct vector inters;
double atrenergy=0.0;
int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, double* halfl,BOOL* orientin,BOOL* positive, double* rcmz,
double * cut, struct vector* partbeg, struct vector* partend);
int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, BOOL* positive, double * cut, struct vector* partbeg, struct vector* partend);
/*interaction with PATCHY SPHEROCYLINDERS*/
if ((interact->param->geotype[0] < SP)&&(interact->param->geotype[0] > SCA)) {
//printf("partdir: %f %f %f \n ",interact->part1->dir.x,interact->part1->dir.y,interact->part1->dir.z);
//printf("patchdir: %f %f %f \n ",interact->part1->patchdir[0].x,interact->part1->patchdir[0].y,interact->part1->patchdir[0].z);
/* calculate position of closest and furthest point (begining and end of spherocylinder)*/
a = (*orientin-0.5)*2; /*if orientin a =1 else a=-1 */
partbeg.x = a * interact->part1->dir.x * halfl;
partbeg.y = a * interact->part1->dir.y * halfl;
partbeg.z = *rcmz + a * interact->part1->dir.z *halfl;
partend.x = - a * interact->part1->dir.x * halfl;
partend.y = - a * interact->part1->dir.y * halfl;
partend.z = *rcmz - a * interact->part1->dir.z * halfl;
//printf("partbeg %f %f %f partend %f %f %f \n",partbeg.x,partbeg.y,partbeg.z,partend.x,partend.y,partend.z);
/*calculate interacting line segment and its cm of spherocylinder*/
/*calculate end point z*/
if ( (interact->param->rcut - interact->dist)/fabs(interact->part1->dir.z) < 2.0*halfl ){
/*if cutoff goes through spherocylinder the end point is at cutoff*/
*interendz *= interact->param->rcut;
} else {
/*endpoint is at the end of spherocylinders*/
*interendz = partend.z;
}
/*calculate CM of interacting line segment of spherocylinder*/
if (*positive) {
cm1.z = AVER(*interendz,interact->dist);
} else {
cm1.z = AVER(*interendz,-interact->dist);
}
if (interact->part1->dir.z != 0.0 ) {
a = (*interendz - cm1.z ) / interact->part1->dir.z;
length1= -orient*2.0*a;
a = a + orient*halfl;
} else {
a = 0.0;
length1 = 2.0*halfl;
}
//printf("len1: %f rcm %f interz %f cutoff %f \n",length1,rcmz, interendz,interact.dist);
cm1.x = interact->part1->dir.x * a;
cm1.y = interact->part1->dir.y * a;
/* we have interacting segment*/
if ((interact->param->geotype[0] == CPSC)||(interact->param->geotype[0] == CHCPSC)) {
/*CPSC type*/
if ( ((*interendz >= interact->dist)&&(*positive)) || ((*interendz <= -interact->dist)&&(!(*positive))) ){
/*test if projection is not all out of interaction*/
line = cpsc_wall(&pbeg,&pend,project,&interact->part1->dir, \
&interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend);
//printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y);
} else {
line = 0;
}
} else {
/*PSC and CHPSC interaction with wall */
line = psc_wall(&pbeg,&pend,project,&interact->part1->dir, \
positive,&interact->param->rcut,&partbeg,&partend);
//printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y);
}
if (line > 0) {
/*cm2 by average begining and end*/
cm2.x = AVER(pbeg.x,pend.x);
cm2.y = AVER(pbeg.y,pend.y);
cm2.z = 0.0;
/*length by size of end-benining*/
length2 = sqrt( (pend.x-pbeg.x)*(pend.x-pbeg.x)+(pend.y-pbeg.y)*(pend.y-pbeg.y) );
inters.x = cm2.x - cm1.x;
inters.y = cm2.y - cm1.y;
inters.z = cm2.z - cm1.z;
//printf("cm2 %f %f %f inters %f %f %f \n",cm2.x,cm2.y,cm2.z,inters.x,inters.y,inters.z);
*ndist = sqrt(DOT(inters,inters));
if (*ndist < interact->param->pdis) {
atrenergy = -interact->param->epsilon;
}
else {
atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon;
}
/* scaling function1: dependence on the length of intersetions plus*/
f0=(length1 + length2)*0.5;
/*scaling with angle*/
f1 = fabs(interact->part1->patchdir[numpatch].z);
atrenergy *= f0*f1;
//printf(" %f %f %f %f %f %f %f \n",conf->particle[target].pos.z*conf->box.z,atrenergy, area, length1, length2,f0,ndist);
//printf("%f %f %f %f\n",pbeg.x,pbeg.y,pend.x,pend.y);
} else {
atrenergy = 0.0;
}
} else {
if (*ndist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon;
}
/*add wall scaling wall area/ particle arear.. to reflect that we have a wall not sphere */
atrenergy *= (interact->param->rcut*interact->param->rcut - (*ndist)*(*ndist))/(interact->param->sigma*interact->param->sigma) ;
}
return atrenergy;
}
void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project)
{
if (*rcmz < 0) {
interact->dist = -(*rcmz);
*positive = FALSE;
*interendz = -1.0;
project->z = 1.0;
} else {
interact->dist = (*rcmz);
*positive = TRUE;
*interendz = 1.0;
project->z = -1.0;
}
/*psc closest is allways end closer to wall*/
if (interact->param->geotype[0] < SP ){
/*calculate closest point distance*/
if (interact->part1->dir.z > 0) {
if (*positive) {
*orientin = FALSE;
*orient = -1.0;
interact->dist = *rcmz -interact->part1->dir.z * interact->param->half_len[0];
} else {
*orientin = TRUE;
*orient = 1.0;
interact->dist = -( *rcmz + interact->part1->dir.z * interact->param->half_len[0]);
}
} else {
if (*positive) {
*orientin = TRUE;
*orient = 1.0;
interact->dist = *rcmz + interact->part1->dir.z * interact->param->half_len[0];
} else {
*orientin = FALSE;
*orient = -1.0;
interact->dist = -( *rcmz -interact->part1->dir.z * interact->param->half_len[0]);
}
}
}
}
/*...........................................................................*/
/*Calculates interaction of target particle and external field
calculate projection of patch of spherocylinder on wall
evaluate intersection area and calculate interaction from that
*/
double exter_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int numpatch,double halfl)
{
double area,a,b,c,r2;
double atrenergy=0.0; /* energy*/
BOOL countend;
struct vector cm1,cm2; /* centrar of interacting segments */
struct vector pbeg,pend; /* projected spherocylinder begining and end*/
struct vector inters,newdir;
struct vector pbeg1,pend1,pbeg2,pend2,pextr1,pextr2,pextr3,pextr4; /*additinal point of projected patch for calculation of area */
double length1, cuttoproject, f0;
int line, line1, line2,extra;
struct vector partbeg,partend; /*closest and furthest point of particle*/
double erepulsive(struct interacts *);
struct vector vec_perpproject(struct vector*, struct vector*);
void normalise(struct vector *);
double fanglscale(double, struct ia_param *, int which);
struct vector vec_create(double, double, double);
double areaeightpoints(struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*);
int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, double* halfl,BOOL* orientin,BOOL* positive, double* rcmz,
double * cut, struct vector* partbeg, struct vector* partend);
int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, BOOL* positive, double * cut, struct vector* partbeg, struct vector* partend);
int cutprojectatwall(struct vector* pextr1, struct vector* pextr2, struct vector* pextr3, struct vector* pextr4,
struct vector* projectdir, struct vector* partdir, double * cutdist, struct vector *partbeg,
struct vector *partend, struct vector *pend, double *cuttoproject, BOOL* orientin);
void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project);
/*interaction with PATCHY SPHEROCYLINDERS*/
if ((interact->param->geotype[0] < SP)&&(interact->param->geotype[0] > SCA)) {
//printf("partdir: %f %f %f \n ",interact->part1->dir.x,interact->part1->dir.y,interact->part1->dir.z);
//printf("patchdir: %f %f %f \n ",interact->part1->patchdir[numpatch].x,interact->part1->patchdir[numpatch].y,interact->part1->patchdir[numpatch].z);
/* calculate position of closest and furthest point (begining and end of spherocylinder)*/
a = (*orientin-0.5)*2; /*if orientin a =1 else a=-1 */
partbeg.x = a * interact->part1->dir.x * halfl;
partbeg.y = a * interact->part1->dir.y * halfl;
partbeg.z = *rcmz + a * interact->part1->dir.z * halfl;
partend.x = - a * interact->part1->dir.x * halfl;
partend.y = - a * interact->part1->dir.y * halfl;
partend.z = *rcmz - a * interact->part1->dir.z * halfl;
//printf("partbeg %f %f %f partend %f %f %f \n",partbeg.x,partbeg.y,partbeg.z,partend.x,partend.y,partend.z);
/*calculate interacting line segment and its cm of spherocylinder*/
/*calculate end point z*/
if ( (interact->param->rcut - interact->dist)/fabs(interact->part1->dir.z) < halfl*2.0 ){
/*if cutoff goes through spherocylinder the end point is at cutoff*/
*interendz *= interact->param->rcut;
} else {
/*endpoint is at the end of spherocylinders*/
*interendz = partend.z;
}
/*calculate CM of interacting line segment of spherocylinder*/
if (*positive) {
cm1.z = AVER(*interendz,interact->dist);
} else {
cm1.z = AVER(*interendz,-interact->dist);
}
if (interact->part1->dir.z != 0.0 ) {
a = (*interendz - cm1.z ) / interact->part1->dir.z;
length1= -orient*2.0*a;
a = a + orient*halfl;
} else {
a = 0.0;
length1 = 2.0*halfl;
}
//printf("len1: %f rcm %f interz %f cutoff %f \n",length1,rcmz, interendz,interact->dist);
cm1.x = interact->part1->dir.x * a;
cm1.y = interact->part1->dir.y * a;
/*calculate projection on wall as infinite line and make it interacting segment*/
if (interact->part1->patchdir[numpatch].z != 0) {
cuttoproject = -interact->param->rcut*interact->part1->patchdir[numpatch].z; /*z coordinate of point where projection is in cut distance*/
if ( ((partend.z < cuttoproject)&&(*positive)) || ((cuttoproject < partend.z)&&(!(*positive))) ){
cuttoproject = partend.z;
}
} else {
cuttoproject = partbeg.z;
}
//printf("cutproject %f \n",cuttoproject);
//printf("cm1 %f %f %f \n",cm1.x, cm1.y,cm1.z );
/* we have interacting segment*/
if ((interact->param->geotype[0] == CPSC)||(interact->param->geotype[0] == CHCPSC)) {
/*CPSC type*/
if ( ((cuttoproject >= interact->dist)&&(*positive)) || ((cuttoproject <= -interact->dist)&&(!(*positive))) ){
/*test if projection is not all out of interaction*/
line = cpsc_wall(&pbeg,&pend,&interact->part1->patchdir[numpatch],&interact->part1->dir, \
&interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend);
//printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y);
} else {
line = 0;
}
} else {
/*PSC and CHPSC interaction with wall */
line = psc_wall(&pbeg,&pend,&interact->part1->patchdir[numpatch],&interact->part1->dir, \
positive,&interact->param->rcut,&partbeg,&partend);
//printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y);
}
if (line > 0) {
area = 0.0;
/*project cutoff boudaries*/
if (line == 2 ) {
/*if projection end is on sphere of begining don't care about cylinder cutoff*/
extra = 0;
} else {
extra = cutprojectatwall(&pextr1, &pextr2, &pextr3, &pextr4, &interact->part1->patchdir[numpatch], \
&interact->part1->dir, &interact->param->rcut, &partbeg, &partend,&pend,&cuttoproject,orientin);
}
//printf("extr1: %d %f %f extr2 %f %f extr3 %f %f extr4 %f %f \n",extra,pextr1.x,pextr1.y,pextr2.x,pextr2.y,pextr3.x,pextr3.y,pextr4.x,pextr4.y);
/*project patch boundaries on the first side*/
newdir=interact->part1->patchsides[0+2*numpatch];
line1 = cpsc_wall(&pbeg1,&pend1,&newdir,&interact->part1->dir, \
&interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend);
if ( ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) ) {
line1 = psc_wall(&pbeg1,&pend1,&newdir,&interact->part1->dir, \
positive,&interact->param->rcut,&partbeg,&partend);
}
//printf("line1: %d beg1 %f %f end1 %f %f \n",line1,pbeg1.x,pbeg1.y,pend1.x,pend1.y);
/*project patch boundaries on the second side*/
newdir=interact->part1->patchsides[1+2*numpatch];
line2 = cpsc_wall(&pbeg2,&pend2,&newdir,&interact->part1->dir, \
&interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend);
if ( ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) ) {
line2 = psc_wall(&pbeg2,&pend2,&newdir,&interact->part1->dir, \
positive,&interact->param->rcut,&partbeg,&partend);
}
//printf("line2: %d beg2 %f %f end2 %f %f \n",line2,pbeg2.x,pbeg2.y,pend2.x,pend2.y);
/*calculate area*/
if (extra == 0) {
/*thish should only happen when there is PSC interacting only with end*/
if (line1 == 0) {
if (line2==0) {
/*circle around middle-pbeg*/
area = PI*( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y));
}
else{
/* circle around middle-pbeg minus circle segment*/
a = AVER(pbeg2.x,pend2.x);
b = AVER(pbeg2.y,pend2.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/
area = r2*(PI-acos(sqrt(c/r2))) + sqrt(r2*c-c*c);
}
} else {
if (line2==0) {
/* circle around middle-pbeg minus circle segment*/
a = AVER(pbeg1.x,pend1.x);
b = AVER(pbeg1.y,pend1.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/
area = r2*(PI-acos(sqrt(c/r2))) + sqrt(r2*c-c*c);
} else {
//area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 E E1 B1 */
/*circle minus two circle segments*/
r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/
area = r2*PI;
a = AVER(pbeg1.x,pend1.x);
b = AVER(pbeg1.y,pend1.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
area += -r2*acos(sqrt(c/r2)) + sqrt(r2*c-c*c);
a = AVER(pbeg2.x,pend2.x);
b = AVER(pbeg2.y,pend2.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
area += -r2*acos(sqrt(c/r2)) + sqrt(r2*c-c*c);
}
}
} else {
b = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend.y)- (pextr2.x-pend.x)*(pextr4.y-pextr2.y));/*pend on 42*/
c = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend.y)- (pextr3.x-pend.x)*(pextr1.y-pextr3.y));/*pend on 13*/
if ( ( b< ZEROTOL) || ( c< ZEROTOL) )
countend = FALSE;
else
countend = TRUE;
if (line1 == 0) {
if (line2 == 0) {
if ( countend ) {
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pextr1,NULL,NULL);/* B 2 4 E 3 1 */
} else
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pextr1,NULL,NULL,NULL);/* B 2 4 3 1 */
} else {
a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y));
if ( a< ZEROTOL) /*pend2 on 42*/ {
if ( countend ) {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pextr1,NULL); /* B B2 E2 4 E 3 1 */
} else {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 4 3 1 */
}
} else {
a = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend2.y)- (pextr3.x-pend2.x)*(pextr1.y-pextr3.y));
if ( a< ZEROTOL) /*pend2 on 13*/ {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr1,NULL,NULL,NULL,NULL); /* B B2 E2 1 */
} else { /*pend2 on 34 or on begining sphere of psc*/
if (line2 == 2) {
if ( countend ) {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pextr1,NULL); /* B B2 E2 4 E 3 1 */
} else {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 4 3 1 */
}
} else {
if ( countend ) {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 E 3 1 */
} else {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pextr1,NULL,NULL,NULL); /* B B2 E2 3 1 */
}
}
}
}
}
} else {
a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend1.y)- (pextr2.x-pend1.x)*(pextr4.y-pextr2.y));
if ( a< ZEROTOL) /*pend1 on 42*/ {
if (line2 == 0) {
area = areaeightpoints(&pbeg,&pextr2,&pend1,&pbeg1,NULL,NULL,NULL,NULL); /* B 2 E1 B1 */
} else {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */
}
} else {
a = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend1.y)- (pextr3.x-pend1.x)*(pextr1.y-pextr3.y));
if ( a< ZEROTOL) /*pend1 on 13*/ {
if (line2 == 0) {
if (countend) {
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B 2 4 E 3 E1 B1 */
} else {
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B 2 4 3 E1 B1 */
}
} else {
a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y));
if ( a< ZEROTOL) /*pend2 on 42*/ {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */
} else {
a = fabs((pextr3.x-pextr1.x)*(pextr1.y-pend2.y)- (pextr1.x-pend2.x)*(pextr3.y-pextr1.y));
if ( a< ZEROTOL) /*pend2 on 31*/ {
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */
} else { /*pend2 close to 34 or on begining sphere of psc*/
if (line2 == 2) {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */
} else {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 E 3 E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 3 E1 B1 */
}
}
}
}
} else {/*pend1 close to 34 or on beging sphere for psc*/
if (line2 == 0) {
if (line1 ==2) {
if (countend)
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B 2 4 E 3 E1 B1*/
else {
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B 2 4 3 E1 B1*/
}
} else {
if (countend)
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pend1,&pbeg1,NULL,NULL); /* B 2 4 E E1 B1*/
else {
area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend1,&pbeg1,NULL,NULL,NULL); /* B 2 4 E1 B1*/
}
}
} else {
a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y));
if ( a< ZEROTOL) /* pend2 on 42 */ {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pend1,&pbeg1,NULL); /* B B2 E2 4 E E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 4 E1 B1 */
} else { /*pend1 and pend2 close to 34 or on beging sphere for psc*/
if (line2 == 2) {
if (line1 == 2) {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */
} else {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pend1,&pbeg1,NULL); /* B B2 E2 4 E E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 4 E1 B1 */
}
} else {
if (line1 == 2) {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 E 3 E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 3 E1 B1 */
} else {
if (countend)
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 E E1 B1 */
else
area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */
}
}
}
}
}
}
} /*extra != 0*/
if ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) {
if (line1==2) {
/* add circle segment*/
a = AVER(pextr1.x,pend1.x); /*end to cutoff - pextr1 ,pend1 */
b = AVER(pextr1.y,pend1.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pend1.x)*(partbeg.x-pend1.x) + (partbeg.y-pend1.y)*(partbeg.y-pend1.y)); /*radius squared*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
a = AVER(pbeg.x,pbeg1.x); /* between beginings - pbeg ,pbeg1 */
b = AVER(pbeg.y,pbeg1.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
} else {
if (line1==0) {
/* add circle segment*/
a = AVER(pextr1.x,pbeg.x); /* begining to cutoff*/
b = AVER(pextr1.y,pbeg.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
}
}
if (line2==2) {
/* add circle segment*/
a = AVER(pextr3.x,pend2.x); /*end to cutoff - pextr3 ,pend2 */
b = AVER(pextr3.y,pend2.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pend2.x)*(partbeg.x-pend2.x) + (partbeg.y-pend2.y)*(partbeg.y-pend2.y)); /*radius squared*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
a = AVER(pbeg.x,pbeg2.x); /* between beginings - pbeg ,pbeg2 */
b = AVER(pbeg.y,pbeg2.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
} else {
if (line2==0) {
/* add circle segment*/
a = AVER(pextr3.x,pbeg.x); /* begining to cutoff*/
b = AVER(pextr3.y,pbeg.y);
c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/
r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/
area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c);
}
}
}
}
/*area finished*/
/*cm2 by average begining and end*/
cm2.x = AVER(pbeg.x,pend.x);
cm2.y = AVER(pbeg.y,pend.y);
cm2.z = 0.0;
/*length by size of end-benining*/
//length2 = sqrt( (pend.x-pbeg.x)*(pend.x-pbeg.x)+(pend.y-pbeg.y)*(pend.y-pbeg.y) );
inters.x = cm2.x - cm1.x;
inters.y = cm2.y - cm1.y;
inters.z = cm2.z - cm1.z;
//printf("cm2 %f %f %f inters %f %f %f \n",cm2.x,cm2.y,cm2.z,inters.x,inters.y,inters.z);
*ndist = sqrt(DOT(inters,inters));
if (*ndist < interact->param->pdis) {
atrenergy = -interact->param->epsilon;
}
else {
atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon;
}
/* scaling function1: dependence on the length of intersetions plus SCALING WITH AREA*/
f0=(length1 + area / interact->param->sigma)*0.5;
atrenergy *= f0;
//printf(" %f %f %f %f %f %f %f %d %d %d \n",conf->particle[target].pos.z*conf->box.z,atrenergy, area, length1, length2,f0,ndist,extra,line1,line2);
//printf("%f %f %f %f\n",pbeg.x,pbeg.y,pend.x,pend.y);
//printf("%f %f %f %f %f %f\n",pbeg2.x,pend2.y,pextr2.x,pextr2.y,pextr1.x,pextr1.y);
} else {
atrenergy = 0.0;
}
} else {
if (*ndist < interact->param->pdis)
atrenergy = -interact->param->epsilon;
else {
atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch);
atrenergy *= -atrenergy*interact->param->epsilon;
}
/*add wall scaling wall area/ particle arear.. to reflect that we have a wall not sphere */
atrenergy *= (interact->param->rcut*interact->param->rcut - (*ndist)*(*ndist))/(interact->param->sigma*interact->param->sigma) ;
}
//printf("%f %f \n",conf->particle[target].pos.z*conf->box.z,atrenergy);
return atrenergy;
}
/*..............................................................................*/
/* Initializes the array with the pointers to the energy function
*/
void init_intfce(double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo){
// NB
// Fill in the names of the functions for calculating the
// interaction energy
long geotype, other_geotype;
int i, j;
for(i = 0; i < MAXT; i++){
for(j = 0; j < MAXT; j++){
/* Initialize them as not existing */
intfce[i][j] = &enoexist;
geotype = topo->ia_params[i][j].geotype[0];
other_geotype = topo->ia_params[i][j].geotype[1];
if ( ( (geotype == CHCPSC || geotype == CPSC || geotype == TCHCPSC || geotype == TCPSC) &&
(other_geotype == CHPSC || other_geotype == PSC || other_geotype == TCHPSC || other_geotype == TPSC) ) ||
( (geotype == CHPSC || geotype == PSC || geotype == TCHPSC || geotype == TPSC) &&
(other_geotype == CHCPSC || other_geotype == CPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) ) ) {
intfce[i][j] = &e_psc_cpsc;
}
if ( (geotype == CHCPSC || geotype == CPSC || geotype == TCHCPSC || geotype == TCPSC) &&
(other_geotype == CHCPSC || other_geotype == CPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) ){
intfce[i][j] = &e_cpsc_cpsc;
}
if ( (geotype == CHPSC || geotype == PSC || geotype == TCHPSC || geotype == TPSC) &&
(other_geotype == CHPSC || other_geotype == PSC || other_geotype == TCHPSC || other_geotype == TPSC) ){
intfce[i][j] = &e_psc_psc;
}
if(geotype == SCN || geotype == SPN
|| other_geotype == SCN || other_geotype == SPN){
intfce[i][j] = &e_spn_or_scn;
}
if((geotype == SCA && other_geotype == SCA)
|| (geotype == SPA && other_geotype == SPA)){
intfce[i][j] = &e_2sca_or_2spa;
}
if((geotype == SCA && other_geotype == SPA)
|| (geotype == SPA && other_geotype == SCA)){
intfce[i][j] = &e_spa_sca;
}
if(( (geotype == PSC || geotype == CHPSC || geotype == TCHPSC || geotype == TPSC) && other_geotype == SPA)
|| (geotype == SPA && (other_geotype == PSC||other_geotype == CHPSC || other_geotype == TCHPSC || other_geotype == TPSC) )){
intfce[i][j] = &e_psc_spa;
}
if(( (geotype == CPSC ||geotype == CHCPSC || geotype == TCHCPSC || geotype == TCPSC) && other_geotype == SPA)
|| (geotype == SPA && (other_geotype == CPSC||other_geotype == CHCPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) )){
intfce[i][j] = &e_cpsc_spa;
}
}
}
}
/*..............................................................................*/
/*
Compare energy change to temperature and based on Boltzmann probability
return either 0 to accept or 1 to reject the move
*/
int movetry(double energyold, double energynew, double temperature)
{
double ran2(long *);
/*DEBUG printf (" Move trial: %13.8lf %13.8lf %13.8lf %13.8lf\n",
energynew, energyold, temperature, ran2(&seed));*/
if (energynew <= energyold ) {
return 0;
} else {
if (exp(-1.0*(energynew-energyold)/temperature) > ran2(&seed)) {
return 0;
} else {
return 1;
}
}
}
/*..............................................................................*/
/*
* Calculate the different energy contributions. This is a merge of the different
* energy calculation functions (energyone, -chain, -all)
* 0: all
* 1: one
* 2: chain
*/
double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *),
int mode, struct topo * topo, struct conf * conf, struct sim * sim, int chainnum)
{
long i=0,j=0;
double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct conf * conf);
// double extere(long, struct topo * topo, struct conf * conf);
double extere2(long, struct topo * topo, struct conf * conf);
//DEBUG_SIM("Calculate the energy with mode %d", mode)
double energy = 0;
/* Calculates energy between particle "target" and the rest. Returns energy */
if(mode == 1){
if (sim->pairlist_update) {
#ifdef OMP
#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic)
#endif
for (i = 0; i < sim->pairlist[target].num_pairs; i++){
energy+= paire(target, sim->pairlist[target].pairs[i], intfce, topo, conf);
}
}
else{
#ifdef OMP
#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic)
#endif
for (i = 0; i < target; i++) {
energy+= paire(target, i, intfce, topo, conf);
}
#ifdef OMP
#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic)
#endif
for (i = target + 1; i < topo->npart; i++) {
energy+= paire(target, i, intfce, topo, conf);
}
}
/*add interaction with external potential*/
if (topo->exter.exist)
energy+= extere2(target,topo,conf);
}
/*
* Calculates energy between particle "target" and the rest. skipping
* particles from the given chain -particles has to be sorted in chain!!
* so similar to energy one but with chain exception
*/
else if(mode == 2){
//#ifdef OMP
//#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic)
//#endif
for (i = 0; i < target; i++) {
if (i != topo->chainlist[chainnum][j]) {
energy+= paire(target, i, intfce, topo, conf);
}
else {
j++;
}
}
j++;
//#ifdef OMP
//#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic)
//#endif
for (i = target + 1; i < topo->npart; i++) {
if (i != topo->chainlist[chainnum][j]) {
energy+= paire(target, i, intfce, topo, conf);
}
else {
j++;
}
}
/*add interaction with external potential*/
if (topo->exter.exist)
energy+= extere2(target,topo,conf);
}
/* Calculates energy between all pairs. Returns energy */
else if(mode == 0){
#ifdef OMP
#pragma omp parallel for private(i,j) reduction (+:energy) schedule (dynamic)
#endif
for (i = 0; i < topo->npart - 1; i++) {
for (j = i + 1; j < topo->npart; j++) {
energy+= paire(i, j, intfce, topo, conf);
}
/*for every particle add interaction with external potential*/
if (topo->exter.exist)
energy+= extere2(i,topo,conf);
}
/*add interaction of last particle with external potential*/
if (topo->exter.exist)
energy+= extere2(topo->npart-1,topo,conf);
}
else {
fprintf(stderr, "ERROR: Wrong mode (%d) was given to calc_energy!", mode);
return 0.0;
}
// DEBUG_SIM("Will return energy from calc_energy")
//printf("energymove %f\n",energy);
return energy;
}
/*..............................................................................*/
/*
Checks for overlaps between particle "target" and the rest. Returns 1 if overlap
detected, 0 otherwise.
*/
int forbidden(long npart, struct particles *particle,
long target, struct vector box, struct ia_param ia_params[MAXT][MAXT])
{
long test;
int overlap(struct particles, struct particles, struct vector,struct ia_param [MAXT][MAXT]);
for (test=0; test<npart; test++) {
if (test != target) {
if ( overlap(particle[target], particle[test], box, ia_params) ) {
return 1;
}
}
}
return 0;
}
/*..............................................................................*/
/*
Checks for overlaps between all pairs of particles. Returns 1 if overlap
detected, 0 otherwise.
*/
int checkall(long npart, struct particles *particle,
struct vector box, struct ia_param ia_params[MAXT][MAXT])
{
long i, j;
int overlap(struct particles, struct particles, struct vector,
struct ia_param [MAXT][MAXT]);
for (i=0; i<npart-1; i++) {
for (j=i+1; j<npart; j++) {
if ( overlap(particle[i], particle[j], box, ia_params) ) {
return 1;
}
}
}
return 0;
}
/*..............................................................................*/
/*
Optimize the maximum displacement within the specified limits and resets the
acceptance counters to zero.
*/
void optimizestep(struct disp *x, double hi, double lo)
{
double newrmsd;
newrmsd = (*x).mx * RATIO(*x);
if ((*x).oldrmsd > 0) {
if ( newrmsd < (*x).oldrmsd ) {
if ( (*x).oldmx > 1 ) {
(*x).mx /= 1.05;
(*x).oldmx = 0.95;
} else {
(*x).mx *= 1.05;
(*x).oldmx = 1.05;
}
} else {
if ( (*x).oldmx > 1 ) {
(*x).mx *= 1.05;
(*x).oldmx = 1.05;
} else {
(*x).mx /= 1.05;
(*x).oldmx = 0.95;
}
}
}
if (newrmsd > 0 ) (*x).oldrmsd = newrmsd;
else {
(*x).oldrmsd = 0.0;
(*x).mx /= 1.05;
(*x).oldmx = 0.95;
}
if ( (*x).mx > hi ) (*x).mx = hi;
if ( (*x).mx < lo ) (*x).mx = lo;
(*x).acc = (*x).rej = 0;
}
/*..............................................................................*/
/*
Optimize the maximum rotation within the specified limits and resets the
acceptance counters to zero. Rotation is given by cos of angle
larger rotation = smaller cos
*/
void optimizerot(struct disp *x, double hi, double lo)
{
double newrmsd;
newrmsd = (*x).mx * RATIO((*x)) ;
if ((*x).oldrmsd > 0) {
if ( newrmsd > (*x).oldrmsd ) {
if ( (*x).oldmx > 1) {
(*x).mx *= 0.99;
(*x).oldmx *= 0.99;
} else {
(*x).mx *= 1.01;
(*x).oldmx *= 1.01;
}
} else {
if ( (*x).oldmx > 1) {
(*x).mx *= 1.01;
(*x).oldmx *= 1.01;
} else {
(*x).mx *= 0.99;
(*x).oldmx *= 0.99;
}
}
}
if (newrmsd > 0 ) (*x).oldrmsd = newrmsd;
else {
(*x).oldrmsd = 0.0;
(*x).mx *= 1.01;
(*x).oldmx = 1.01;
}
if ( (*x).mx > hi ) (*x).mx = hi;
if ( (*x).mx < lo ) (*x).mx = lo;
(*x).acc = (*x).rej = 0;
}
/*................................................................................*/
/*
Accumulate a value into the statistics and update the mean and rms values.
*/
void accumulate(struct stat *q, double x)
{
(*q).sum += x;
(*q).sum2 += x*x;
(*q).samples++;
(*q).mean = (*q).sum / (*q).samples;
(*q).rms = sqrt(fabs((*q).sum2 / (*q).samples -
(*q).sum * (*q).sum / (*q).samples / (*q).samples));
}
void printeqstat(struct disp *dat, double scale, int length)
{
int i;
for (i=0;i<length;i++) {
if (RATIO(dat[i]) > 0)
printf (" TYPE %d %.6lf / %.6lf\n", i, dat[i].mx/scale,RATIO(dat[i]));
}
}
int memoryalloc(struct conf * conf)
{
printf ("Allocating memory...\n");
conf->particle = malloc( sizeof(struct particles)*MAXN);
if(conf->particle == NULL){
return 1;
}
return 0;
}
int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim)
{
int dealloc_pairlist(struct topo * topo, struct sim * sim);
printf ("Deallocating memory...\n");
if (conf->particle != NULL)
free(conf->particle);
conf->particle = NULL;
if (sim->clusterlist != NULL)
free(sim->clusterlist);
if (sim->clustersenergy != NULL)
free(sim->clustersenergy);
if(topo->switchlist){
free(topo->switchlist);
}
if (sim->pairlist_update) {
if(dealloc_pairlist(topo, sim)){
return 1;
}
}
return 0;
}
/*............................................................................*/
/**
* nice malloc, which does the error checking for us
*/
void * xmalloc (size_t num){
void *new = malloc (num);
if (!new){
fprintf(stderr, "Couldn't allocate any memory!\n");
exit(1);
}
return new;
}
/*............................................................................*/
/* *********************** GEOMETRICAL FUNCTIONS **************************** */
/*.........................PATCHY SPOHEROCYLINDERS INTERACTION....................*/
/*................................................................................*/
/*
Calculate intersections of sc2 with a patch of sc1 and return them in
*/
int psc_intersect(struct particles * part1, struct particles * part2,
double halfl1, double halfl2, struct vector r_cm, double intersections[5], double rcut,
struct ia_param * param, int which, int patchnum)
{
int intrs;
double a, b, c, d, e, x1, x2, rcut2;
struct vector cm21, vec1, vec2, vec3, vec4;
struct vector vec_crossproduct(struct vector, struct vector);
struct vector vec_sub(struct vector, struct vector);
struct vector vec_create(double, double, double);
struct vector vec_scale(struct vector, double);
struct vector vec_perpproject(struct vector*, struct vector*);
struct quat quat_create(struct vector, double, double);
void vec_rotate(struct vector *, struct quat);
void normalise(struct vector *);
int find_intersect_plane(struct particles *, struct particles *, double,
struct vector, struct vector, double, double, double *);
int test_intrpatch(struct particles *, struct vector, double, double, double *,int);
intrs=0;
rcut2=rcut*rcut;
/*1- do intersections of spherocylinder2 with patch of spherocylinder1 at
cut distance C*/
/*1a- test intersection with half planes of patch and look how far they are
from spherocylinder. If closer then C we got itersection*/
/* plane1 */
/* find intersections of part2 with plane by par1 and patchsides[0] */
intrs+=find_intersect_plane(part1,part2,halfl2,r_cm,part1->patchsides[0+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections);
// printf("plane1 %d\n", intrs);
/* plane2 */
/* find intersections of part2 with plane by par1 and patchsides[1] */
intrs+=find_intersect_plane(part1,part2,halfl2,r_cm,part1->patchsides[1+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections);
if ( (intrs == 2 ) && (param->pcanglsw[which+2*patchnum] <0) ) {
fprintf (stderr, "ERROR: Patch is larger than 180 degrees and we are getting two segments - this hasnot been programed yet.\n\n");
exit (1);
}
// printf("plane2 %d\n", intrs);
/*1b- test intersection with cylinder - it is at distance C*/
if (intrs < 2 ) {
cm21=vec_scale(r_cm,-1.0);
vec1=vec_crossproduct(cm21,part1->dir);
vec2=vec_crossproduct(part2->dir,part1->dir);
a = DOT(vec2,vec2);
b = 2*DOT(vec1,vec2);
c = -rcut*rcut + DOT(vec1,vec1);
d = b*b - 4*a*c;
if ( d >= 0) { /*there is intersection with infinite cylinder */
x1 = (-b+sqrt(d))*0.5/a;/*parameter on line of SC2 determining intersection*/
if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
/* vectors from center os sc1 to intersection with infinite cylinder*/
vec1.x=part2->dir.x*x1-r_cm.x;
vec1.y=part2->dir.y*x1-r_cm.y;
vec1.z=part2->dir.z*x1-r_cm.z;
e = DOT(part1->dir,vec1);
if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/
else {
intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum);
}
}
if ( d > 0 ){
x2 = (-b-sqrt(d))*0.5/a;/*parameter on line of SC2 determining intersection*/
if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec2.x = part2->dir.x*x2-r_cm.x;
vec2.y = part2->dir.y*x2-r_cm.y;
vec2.z = part2->dir.z*x2-r_cm.z;
e = DOT(part1->dir,vec2);
if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/
else {
intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum);
}
}
}
}
}
// printf ("cylinder %d x1 %f x2 %f e %f\n", intrs, x1, x2, e);
/*1c- test intersection with spheres at the end - it is at distace C*/
if (intrs < 2 ) {
/*centers of spheres*/
/*relative to the CM of sc2*/
vec1.x = part1->dir.x*halfl1 - r_cm.x;
vec1.y = part1->dir.y*halfl1 - r_cm.y;
vec1.z = part1->dir.z*halfl1 - r_cm.z;
vec2.x = -part1->dir.x*halfl1 - r_cm.x;
vec2.y = -part1->dir.y*halfl1 - r_cm.y;
vec2.z = -part1->dir.z*halfl1 - r_cm.z;
/*sphere1*/
a = DOT(part2->dir,part2->dir);
b = 2.0*DOT(vec1,part2->dir);
c = DOT(vec1,vec1)-rcut*rcut;
d = b*b-4*a*c;
if (d >= 0) { /*if d<0 there are no intersections*/
x1= (-b + sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec3.x = part2->dir.x*x1-r_cm.x;
vec3.y = part2->dir.y*x1-r_cm.y;
vec3.z = part2->dir.z*x1-r_cm.z;
e = DOT(part1->dir,vec3);
if ((e >= halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/
intrs+=test_intrpatch(part1,vec3,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum);
}
}
if ( d > 0) {
x2= (-b - sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec4.x = part2->dir.x*x2 - r_cm.x;
vec4.y = part2->dir.y*x2 - r_cm.y;
vec4.z = part2->dir.z*x2 - r_cm.z;
e = DOT(part1->dir,vec4);
if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/
intrs+=test_intrpatch(part1,vec4,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum);
}
}
}
}
// printf ("sphere1 %d x1 %f x2 %f e %f\n", intrs, x1, x2, e);
/*sphere2*/
a = DOT(part2->dir,part2->dir);
b = 2.0*DOT(vec2,part2->dir);
c = DOT(vec2,vec2)-rcut*rcut;
d = b*b-4*a*c;
if (d >= 0) { /*if d<0 there are no intersections*/
x1= (-b + sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec3.x = part2->dir.x*x1 - r_cm.x;
vec3.y = part2->dir.y*x1 - r_cm.y;
vec3.z = part2->dir.z*x1 - r_cm.z;
e = DOT(part1->dir,vec3);
if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/
intrs+=test_intrpatch(part1,vec3,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum);
}
}
if ( d > 0 ) {
x2= (-b - sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec4.x = part2->dir.x*x2 - r_cm.x;
vec4.y = part2->dir.y*x2 - r_cm.y;
vec4.z = part2->dir.z*x2 - r_cm.z;
e = DOT(part1->dir,vec4);
if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/
intrs+=test_intrpatch(part1,vec4,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum);
}
}
}
}
// printf ("sphere2 %d\n", intrs);
}
/*1d- if there is only one itersection shperocylinder ends within patch wedge
set as second intersection end inside patch*/
if (intrs < 2 ) {
/*whole spherocylinder is in or all out if intrs ==0*/
vec1.x = part2->dir.x*halfl2 - r_cm.x;
vec1.y = part2->dir.y*halfl2 - r_cm.y;
vec1.z = part2->dir.z*halfl2 - r_cm.z;
/*vector from CM of sc1 to end of sc2*/
/*check is is inside sc1*/
a=DOT(vec1,part1->dir);
vec3.x = vec1.x - part1->dir.x*a;
vec3.y = vec1.y - part1->dir.y*a;
vec3.z = vec1.z - part1->dir.z*a;
b=DOT(vec3,vec3);
d = fabs(a)-halfl1;
if ( d <= 0)
c = b; /*is inside cylindrical part*/
else
c = d*d + b; /*is inside caps*/
/*c is distance squared from line or end to test if is inside sc*/
if (c < rcut2)
intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],halfl2,intersections,patchnum);
if (intrs < 2 ) {
vec2.x = -part2->dir.x*halfl2 - r_cm.x;
vec2.y = -part2->dir.y*halfl2 - r_cm.y;
vec2.z = -part2->dir.z*halfl2 - r_cm.z;
/*check is is inside sc1*/
a=DOT(vec2,part1->dir);
vec4.x = vec2.x - part1->dir.x*a;
vec4.y = vec2.y - part1->dir.y*a;
vec4.z = vec2.z - part1->dir.z*a;
b=DOT(vec4,vec4);
d = fabs(a) -halfl1;
if (d <= 0)
c = b; /*is inside cylindrical part*/
else
c = d*d + b; /*is inside caps*/
/*c is distance squared from line or end to test if is inside sc*/
if (c < rcut2)
intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],-1.0*halfl2,intersections,patchnum);
}
// printf ("ends %d\n", intrs);
}
return intrs;
}
/*................................................................................*/
/*
Find if vector vec has angular intersection with patch of sc1
*/
int test_intrpatch(struct particles * part1, struct vector vec, double cospatch,
double ti, double intersections[5],int patchnum)
{
double a;
int i, intrs;
struct vector vec_perpproject(struct vector*, struct vector*);
void normalise(struct vector *);
intrs=0;
/*test if we have intersection*/
/* do projection to patch plane*/
vec=vec_perpproject(&vec,&part1->dir);
normalise(&vec);
/* test angle distance from patch*/
a = DOT(part1->patchdir[patchnum],vec);
if (a >= cospatch) {
intrs=1;
i=0;
while (intersections[i] !=0) {
if (ti == intersections[i])
intrs=0; /* found intersection we already have -it is at boundary*/
i++;
}
if (intrs > 0)
intersections[i]=ti;
}
return intrs;
}
/*................................................................................*/
/*
Find intersections of SC and plane defined by vector w_vec.and returns number of them
*/
int find_intersect_plane(struct particles * part1, struct particles * part2, double halfl2,
struct vector r_cm, struct vector w_vec, double rcut, double cospatch, double intersections[5])
{
int i, intrs;
double a, c, d, ti, disti;
struct vector nplane, d_vec;
void normalise(struct vector *);
struct vector vec_crossproduct(struct vector, struct vector);
nplane=vec_crossproduct(part1->dir,w_vec);
normalise(&nplane);
normalise(&w_vec);
a = DOT(nplane, part2->dir);
if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/
else {
ti = DOT(nplane,r_cm)/a;
if ((ti > halfl2 ) || (ti < -halfl2)) intrs=0; /* there is no intersection plane sc is too short*/
else {
d_vec.x = ti * part2->dir.x - r_cm.x; /*vector from intersection point to CM*/
d_vec.y = ti * part2->dir.y - r_cm.y;
d_vec.z = ti * part2->dir.z - r_cm.z;
c = DOT (d_vec, w_vec);
if ( c * cospatch < 0) intrs=0; /* the intersection in plane is on other side of patch */
else {
d = fabs(DOT (d_vec, part1->dir)) - halfl2;
if (d <= 0) disti = c*c; /*is inside cylinder*/
else disti = d*d + c*c; /*is inside patch*/
if (disti > rcut*rcut) intrs=0; /* the intersection is outside sc */
else {
intrs=1;
i=0;
while (intersections[i] !=0) {
if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/
i++;
}
if (intrs > 0) {
intersections[i]=ti;
}
}
}
}
}
return intrs;
}
/*CPSC................................................................................*/
/*
Calculate intersections of sc2 with a patch of sc1 and return them in this works for cylindrical psc -CPSC
*/
int cpsc_intersect(struct particles * part1, struct particles * part2,
double halfl1, double halfl2, struct vector r_cm, double intersections[5], double rcut,
struct ia_param * param, int which, int patchnum)
{
int intrs;
double a, b, c, d, e, x1, x2, rcut2;
struct vector cm21, vec1, vec2, vec3, vec4;
struct vector vec_crossproduct(struct vector, struct vector);
struct vector vec_sub(struct vector, struct vector);
struct vector vec_create(double, double, double);
struct vector vec_scale(struct vector, double);
struct vector vec_perpproject(struct vector*, struct vector*);
struct quat quat_create(struct vector, double, double);
void vec_rotate(struct vector *, struct quat);
void normalise(struct vector *);
int find_intersect_planec(struct particles *, struct particles *, double,
struct vector, struct vector, double, double, double *);
int test_intrpatch(struct particles *, struct vector, double, double, double *, int);
intrs=0;
rcut2=rcut*rcut;
/*1- do intersections of spherocylinder2 with patch of spherocylinder1 at
cut distance C*/
/*1a- test intersection with half planes of patch and look how far they are
from spherocylinder. If closer then C we got itersection*/
/* plane1 */
/* find intersections of part2 with plane by par1 and part1->patchsides[0] */
intrs+=find_intersect_planec(part1,part2,halfl2,r_cm,part1->patchsides[0+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections);
// printf("plane1 %d\n", intrs);
/* plane2 */
/* find intersections of part2 with plane by par1 and part1->patchsides[1] */
intrs+=find_intersect_planec(part1,part2,halfl2,r_cm,part1->patchsides[1+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections);
if ( (intrs == 2 ) && (param->pcanglsw[which+2*patchnum] < 0) ) {
fprintf (stderr, "ERROR: Patch is larger than 180 degrees and we are getting two segments - this hasnot been programed yet.\n\n");
exit (1);
}
// printf("plane2 %d\n", intrs);
/*1b- test intersection with cylinder - it is at distance C*/
if (intrs < 2 ) {
cm21=vec_scale(r_cm,-1.0);
vec1=vec_crossproduct(cm21,part1->dir);
vec2=vec_crossproduct(part2->dir,part1->dir);
a = DOT(vec2,vec2);
b = 2*DOT(vec1,vec2);
c = -rcut*rcut + DOT(vec1,vec1);
d = b*b - 4*a*c;
if ( d >= 0) { /*there is intersection with infinite cylinder */
x1 = (-b+sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
/* vectors from center os sc1 to intersection with infinite cylinder*/
vec1.x=part2->dir.x*x1-r_cm.x;
vec1.y=part2->dir.y*x1-r_cm.y;
vec1.z=part2->dir.z*x1-r_cm.z;
e = DOT(part1->dir,vec1);
if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/
else {
intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum);
}
}
if ( d > 0 ){
x2 = (-b-sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/
if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/
else {
vec2.x = part2->dir.x*x2-r_cm.x;
vec2.y = part2->dir.y*x2-r_cm.y;
vec2.z = part2->dir.z*x2-r_cm.z;
e = DOT(part1->dir,vec2);
if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/
else {
intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum);
}
}
}
}
}
// printf ("cylinder %d x1 %f x2 %f e %f\n", intrs, x1, x2, e);
/*1c- test intersection with plates at the end - it is at distace C and in wedge*/
if (intrs < 2 ) {
a = DOT(part1->dir, part2->dir);
if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/
else {
/*plane cap1*/
vec1.x= r_cm.x + halfl1*part1->dir.x;
vec1.y= r_cm.y + halfl1*part1->dir.y;
vec1.z= r_cm.z + halfl1*part1->dir.z;
x1 = DOT(part1->dir,vec1)/a; /*parameter on line of SC2 determining intersection*/
if ((x1 > halfl2 ) || (x1 < -halfl2)) intrs+=0; /* there is no intersection plane sc is too short*/
else {
vec2.x = x1*part2->dir.x - vec1.x; /*vector from ENDPOINT to intersection point */
vec2.y = x1*part2->dir.y - vec1.y;
vec2.z = x1*part2->dir.z - vec1.z;
b = DOT (vec2, vec2);
if (b > rcut*rcut) intrs+=0; /* the intersection is outside sc */
else {
intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum);
}
}
// printf ("plane cap1 %d %f\n", intrs, x1);
/*plane cap2*/
vec1.x= r_cm.x - halfl1*part1->dir.x;
vec1.y= r_cm.y - halfl1*part1->dir.y;
vec1.z= r_cm.z - halfl1*part1->dir.z;
x2 = DOT(part1->dir,vec1)/a; /*parameter on line of SC2 determining intersection*/
if ((x2 > halfl2 ) || (x2 < -halfl2)) intrs+=0; /* there is no intersection plane sc is too short*/
else {
vec2.x = x2*part2->dir.x - vec1.x; /*vector from ENDPOINT to intersection point */
vec2.y = x2*part2->dir.y - vec1.y;
vec2.z = x2*part2->dir.z - vec1.z;
b = DOT (vec2, vec2);
if (b > rcut*rcut) intrs+=0; /* the intersection is outside sc */
else {
intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum);
}
}
// printf ("plane cap2 %d %f\n", intrs,x2);
}
}
/*1d- if there is only one itersection shperocylinder ends within patch wedge
set as second intersection end inside patch*/
if (intrs < 2 ) {
/*whole spherocylinder is in or all out if intrs ==0*/
vec1.x = part2->dir.x*halfl2 - r_cm.x;
vec1.y = part2->dir.y*halfl2 - r_cm.y;
vec1.z = part2->dir.z*halfl2 - r_cm.z;
/*vector from CM of sc1 to end of sc2*/
/*check is is inside sc1*/
a=DOT(vec1,part1->dir);
vec3.x = vec1.x - part1->dir.x*a;
vec3.y = vec1.y - part1->dir.y*a;
vec3.z = vec1.z - part1->dir.z*a;
b=DOT(vec3,vec3);
d = fabs(a)-halfl1;
if ( d <= 0) { /*is in cylindrical part*/
/*c is distance squared from line or end to test if is inside sc*/
if (b < rcut2) intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],halfl2,intersections,patchnum);
}
if (intrs < 2 ) {
vec2.x = -part2->dir.x*halfl2 - r_cm.x;
vec2.y = -part2->dir.y*halfl2 - r_cm.y;
vec2.z = -part2->dir.z*halfl2 - r_cm.z;
/*check is is inside sc1*/
a=DOT(vec2,part1->dir);
vec4.x = vec2.x - part1->dir.x*a;
vec4.y = vec2.y - part1->dir.y*a;
vec4.z = vec2.z - part1->dir.z*a;
b=DOT(vec4,vec4);
d = fabs(a) -halfl1;
if (d <= 0) {
/*c is distance squared from line or end to test if is inside sc*/
if (b < rcut2) intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],-1.0*halfl2,intersections,patchnum);
}
}
// printf ("ends %d\n", intrs);
}
return intrs;
}
/*CPSC................................................................................*/
/*
Find intersections of plane defined by vector w_vec.and returns number of them - for cylindrical psc -CPSC
*/
int find_intersect_planec(struct particles * part1, struct particles * part2, double halfl,
struct vector r_cm, struct vector w_vec, double rcut, double cospatch, double intersections[5])
{
int i, intrs=0;
double a, c, d, ti, disti;
struct vector nplane, d_vec;
void normalise(struct vector *);
struct vector vec_crossproduct(struct vector, struct vector);
nplane=vec_crossproduct(part1->dir,w_vec);
normalise(&nplane);
normalise(&w_vec);
a = DOT(nplane, part2->dir);
if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/
else {
ti = DOT(nplane,r_cm)/a;
if ((ti > halfl ) || (ti < -halfl)) intrs=0; /* there is no intersection plane sc is too short*/
else {
d_vec.x = ti*part2->dir.x - r_cm.x; /*vector from intersection point to CM*/
d_vec.y = ti*part2->dir.y - r_cm.y;
d_vec.z = ti*part2->dir.z - r_cm.z;
c = DOT (d_vec, w_vec);
if ( c *cospatch < 0) intrs=0; /* the intersection in plane is on other side of patch */
else {
d = fabs(DOT (d_vec, part1->dir)) - halfl;
if (d <= 0) {
disti= c*c; /*is inside cylinder*/
if (disti > rcut*rcut) intrs=0; /* the intersection is outside sc */
else {
intrs=1;
i=0;
while (intersections[i] !=0) {
if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/
i++;
}
if (intrs > 0) intersections[i]=ti;
}
}
}
}
}
return intrs;
}
/*..................................................................................*/
/*
Find projection of cpsc on plane (0,0,1) including cutoff and return
vector to its begining and end and cm
*/
int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, BOOL* positive, double * cutdist,
struct vector *partbeg, struct vector *partend)
{
struct vector vec1;
double k,x1,x2,y1,y2,a,b,c,e,d;
void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result);
void normalise(struct vector*);
if (( (*positive)&& (projectdir->z > 0) ) || ( (!(*positive))&& (projectdir->z < 0) ))
return 0;
if ( fabs(partbeg->z) > (*cutdist) )
return 0;
/* we might have interacting segment*/
x2 = 0.0;
y2 = 0.0;
/*begining point*/
/*if begining projected along particle direction is within cutoff */
if (fabs(partdir->z) > ZEROTOL2) {
projectinz(partbeg,partdir,pbeg);
a=0;
}
else {
/*we need some starting point*/
vec1.x = 2.0*partbeg->x - partend->x;
vec1.y = 2.0*partbeg->y - partend->y;
vec1.z = 2.0*partbeg->z - partend->z;
projectinz(&vec1,projectdir,pbeg);
a=1;
}
if (partdir->z != 0) {
b = fabs(partbeg->z / partdir->z);
} else {
b = (*cutdist)+1.0;
}
if ( (b > (*cutdist)) || (a==1)) {
/*else beginig is at sphere, find intersections with sphere of cutoff radius*/
if ( fabs(projectdir->z) > ZEROTOL2) {
projectinz(partbeg,projectdir,pend);
} else {
pend->x = pbeg->x + projectdir->x;
pend->y = pbeg->y + projectdir->y;
}
if (pend->y == pbeg->y) {
y1=pbeg->y;
y2=pbeg->y;
a=sqrt( (*cutdist)*(*cutdist) - partbeg->z*partbeg->z - (pbeg->y-partbeg->y)*(pbeg->y-partbeg->y) );
x1 = partbeg->x + a;
x2 = partbeg->x - a;
if (pend->x > pbeg->x) {/*select the right intersection*/
pbeg->x = x2;
x2 = x1;
} else {
pbeg->x = x1;
}
pbeg->y = y1;
} else {
k = (pend->x - pbeg->x)/ (pend->y - pbeg->y);
a = k*k +1;
b = partbeg->y + k*k*pbeg->y - k*pbeg->x + k*partbeg->x;
c = partbeg->y*partbeg->y + partbeg->z*partbeg->z - (*cutdist)*(*cutdist) + (k*pbeg->y - pbeg->x + partbeg->x)*(k*pbeg->y - pbeg->x + partbeg->x);
e = b*b-a*c;
if (e < 0) {
return 0; /*tehre might be no intersection with sphere*/
}
d = sqrt(e);
if (pend->y > pbeg->y) {/*select the right intersection*/
y1 = (b - d ) /a;
y2 = (b + d ) /a;
}
else {
y1 = (b + d ) /a;
y2 = (b - d ) /a;
}
x1 = k * (y1 - pbeg->y) + pbeg->x;
x2 = k * (y2 - pbeg->y) + pbeg->x;
pbeg->x = x1;
pbeg->y = y1;
pbeg->z = 0.0;
}
}
//printf("pscwall beg %f %f \n",pbeg->x,pbeg->y);
/*end point*/
a = -(*cutdist) * projectdir->z; /*z coordinate of point where projection is in cut distance*/
//printf("sphere end %f %f ",a,partend->z);
if ( ((partend->z < a)&&(*positive)) || ((a < partend->z)&&(!(*positive))) ){
/*end is within cut off - second sphere*/
/*if this is the case vec1 is end of pherocylinder and pend is its projection*/
if (projectdir->z != 0) {
projectinz(partend,projectdir,pend);
} else {
pend->x = pbeg->x + projectdir->x;
pend->y = pbeg->y + projectdir->y;
}
if (pend->y == pbeg->y) {
y1=pend->y;
y2=pend->y;
a=sqrt( (*cutdist)*(*cutdist) - partend->z*partend->z - (pend->y-partend->y)*(pend->y-partend->y) );
x1 = partend->x + a;
x2 = partend->x - a;
if (pbeg->x > pend->x) {/*select the right intersection*/
pend->x = x2;
} else {
pend->x = x1;
}
pend->y = y1;
} else {
k = (pbeg->x - pend->x)/ (pbeg->y - pend->y);
a = k*k +1;
b = partend->y + k*k*pend->y - k*pend->x + k*partend->x;
c = partend->y*partend->y + partend->z*partend->z - (*cutdist)*(*cutdist) + (k*pend->y - pend->x + partend->x)*(k*pend->y - pend->x + partend->x);
e = b*b-a*c;
if (e < 0) {
return 0; /*there might be no intersection with sphere*/
}
d = sqrt(e);
if (pbeg->y > pend->y) {/*select the right intersection*/
y1 = (b - d ) /a;
y2 = (b + d ) /a;
}
else {
y1 = (b + d ) /a;
y2 = (b - d ) /a;
}
x1 = k * (y1 - pend->y) + pend->x;
x2 = k * (y2 - pend->y) + pend->x;
pend->x = x1;
pend->y = y1;
pend->z = 0.0;
}
} else {
if ( ((partbeg->z < a)&&(*positive)) || ((a < partbeg->z)&&(!(*positive))) ) {
/*end is at cutoff going through cylindrical part*/
//printf("cylinder ");
b = (a - partbeg->z)/ partdir->z;
vec1.x = partbeg->x + b * partdir->x;
vec1.y = partbeg->y + b * partdir->y;
vec1.z = a;
projectinz(&vec1,projectdir,pend);
} else {
/* also projected end is within the same sphere as begining- no contribution from cylinder*/
if (x2 == 0.0 ) {
//printf("sphere beg ");
if (projectdir->z != 0) {
projectinz(partbeg,projectdir,pend);
} else {
pend->x = pbeg->x + projectdir->x;
pend->y = pbeg->y + projectdir->y;
}
if (pend->y == pbeg->y) {
y1=pbeg->y;
y2=pbeg->y;
a=sqrt( (*cutdist)*(*cutdist) - partbeg->z*partbeg->z - (pbeg->y-partbeg->y)*(pbeg->y-partbeg->y) );
x1 = partbeg->x + a;
x2 = partbeg->x - a;
if (pend->x > pbeg->x) {/*select the right intersection*/
pend->x = x1;
} else {
pend->x = x2;
}
pend->y = y1;
} else {
k = (pend->x - pbeg->x)/ (pend->y - pbeg->y);
a = k*k +1;
b = partbeg->y + k*k*pbeg->y - k*pbeg->x + k*partbeg->x;
c = partbeg->y*partbeg->y + partbeg->z*partbeg->z - (*cutdist)*(*cutdist) + (k*pbeg->y - pbeg->x + partbeg->x)*(k*pbeg->y - pbeg->x + partbeg->x);
e = b*b-a*c;
if (e < 0) {
return 0; /*tehre might be no intersection with sphere*/
}
d = sqrt(e);
if (pend->y > pbeg->y) {/*select the right intersection*/
y1 = (b - d ) /a;
y2 = (b + d ) /a;
}
else {
y1 = (b + d ) /a;
y2 = (b - d ) /a;
}
x1 = k * (y1 - pbeg->y) + pbeg->x;
x2 = k * (y2 - pbeg->y) + pbeg->x;
pend->x = x1;
pend->y = y1;
pend->z = 0.0;
}
} else {
pend->x = x2;
pend->y = y2;
pend->z = 0.0;
}
return 2; /*line end is on sphere of particle begining = no cylindrical cutoff*/
}
}
return 1;
}
int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir,
struct vector* partdir, double* halfl, BOOL* orientin,
BOOL* positive, double* rcmz, double * cutdist,
struct vector *partbeg, struct vector *partend)
{
struct vector vec1;
double a;
void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result);
if (( (*positive)&& (projectdir->z >= 0) ) || ( (!(*positive))&& (projectdir->z <= 0) ))
return 0;
/*if projected closer point beoynd cutoff no interaction*/
/*project begining of spherocylinder*/
vec1.x = partbeg->x;
vec1.y = partbeg->y;
vec1.z = partbeg->z;
if (-vec1.z/projectdir->z < (*cutdist) ) {
projectinz(&vec1,projectdir,pbeg);
} else {
return 0;
}
/* we have interacting segment*/
if (-partend->z/projectdir->z < (*cutdist) ) {
/*whole segment interacts*/
vec1.z = partend->z;
} else {
vec1.z = -(*cutdist)*projectdir->z;
}
if (partdir->z != 0.0)
a = (vec1.z - (*rcmz)) / partdir->z;
else {
if (*orientin)
a = -(*halfl);
else
a = (*halfl);
}
vec1.x = partdir->x * a;
vec1.y = partdir->y * a;
projectinz(&vec1,projectdir,pend);
return 1;
}
int cutprojectatwall(struct vector* pextr1, struct vector* pextr2, struct vector* pextr3, struct vector* pextr4,
struct vector* projectdir, struct vector* partdir, double * cutdist,
struct vector *partbeg, struct vector *partend, struct vector *pend,
double *cuttoproject, BOOL* orientin)
{
double y1,y2,O2z,det,a,b,dirydirz,dir2x,dir2y,dir2z,dirzldiry;
void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result);
dirydirz = partdir->y * partdir->z;
dir2x = partdir->x * partdir->x;
dir2y = partdir->y * partdir->y;
dir2z = partdir->z * partdir->z;
a = 1/(dir2x+dir2y);
if (partdir->x != 0) {
O2z = partbeg->z * partbeg->z;
b=dir2y*dir2z*O2z - (dir2x+dir2y) * (O2z*(dir2x+dir2z)- (*cutdist)*(*cutdist)*dir2x);
if (b < 0 ) {
/*no cutoff from cylindrical part*/
return 0;
}
det = sqrt(b);
y1 = partbeg->y + (dirydirz*partbeg->z + det )*a;
y2 = partbeg->y + (dirydirz*partbeg->z - det )*a;
if (( (partdir->x > 0)&&(!(*orientin)) ) || ( (partdir->x < 0)&&(*orientin) )) {
pextr1->y = y1;
pextr2->y = y2;
} else {
pextr1->y = y2;
pextr2->y = y1;
}
pextr1->x = partbeg->x + (partbeg->z*partdir->z - (pextr1->y - partbeg->y)*partdir->y) / partdir->x;
pextr2->x = partbeg->x + (partbeg->z*partdir->z - (pextr2->y - partbeg->y)*partdir->y) / partdir->x;
O2z = partend->z * partend->z;
b= dir2y*dir2z*O2z - (dir2x+dir2y) * (O2z*(dir2x+dir2z)- (*cutdist)*(*cutdist)*dir2x);
if (b >= 0) { /*we have intersections from end*/
det = sqrt(b);
y1 = partend->y + (dirydirz * partend->z + det )*a;
y2 = partend->y + (dirydirz * partend->z - det )*a;
//printf("det %f y1 %f y2 %f \n", det,y1,y2);
if (( (partdir->x > 0)&&(!(*orientin)) ) || ( (partdir->x < 0)&&(*orientin) )) {
pextr3->y = y1;
pextr4->y = y2;
} else {
pextr3->y = y2;
pextr4->y = y1;
}
pextr3->x = partend->x + (partend->z*partdir->z - (pextr3->y - partend->y)*partdir->y) / partdir->x;
pextr4->x = partend->x + (partend->z*partdir->z - (pextr4->y - partend->y)*partdir->y) / partdir->x;
} else {
/*no intersection at the end the cutoff intersects the plane
in the perpendicular projection of line segemnt, so we have to use that point */
if (partdir->z == 0) {
fprintf (stderr, "\nERROR: Something went wrong in calculation of projection.\n\n");
exit (1);
} else {
a = ((*cuttoproject) - partbeg->z)/ partdir->z;
//if ( projectdir->y * partdir->x < 0 )
pextr3->x = partbeg->x + a * partdir->x;
pextr3->y = partbeg->y + a * partdir->y;
pextr3->z = (*cuttoproject);
//printf("before proj %f %f dir %f %f %f ",pextr3->x,pextr3->y,projectdir->x,projectdir->y,projectdir->z);
projectinz(pextr3,projectdir,pextr4);
pextr3->x = pextr4->x;
pextr3->y = pextr4->y;
pextr3->z = 0.0;
//printf("after proj %f %f \n",pextr3->x,pextr3->y);
return 2;
}
}
} else {
if (partdir->y != 0) {
dirzldiry = partdir->z/partdir->y;
y1 = partbeg->y + partbeg->z * dirzldiry;
det = sqrt( (*cutdist)*(*cutdist) - partbeg->z * partbeg->z * (1+dirzldiry*dirzldiry) );
if (( (partdir->y > 0)&&(!(*orientin)) ) || ( (partdir->y < 0)&&(*orientin) )) {
pextr1->x = partbeg->x + det;
pextr2->x = partbeg->x - det;
} else {
pextr1->x = partbeg->x - det;
pextr2->x = partbeg->x + det;
}
pextr1->y = y1;
pextr2->y = y1;
y1 = partend->y + partend->z * dirzldiry;
b = (*cutdist)*(*cutdist) - partend->z * partend->z * (1+dirzldiry*dirzldiry);
if (b >= 0) { /*we have intersections from end*/
det = sqrt(b);
if (( (partdir->y > 0)&&(!(*orientin)) ) || ( (partdir->y < 0)&&(*orientin) )) {
pextr3->x = partend->x + det;
pextr4->x = partend->x - det;
} else {
pextr3->x = partend->x - det;
pextr4->x = partend->x + det;
}
pextr3->y = y1;
pextr4->y = y1;
} else {
/*no intersection at the end the cutoff intersects the plane
in the perpendicular projection of line segemnt, so we have to use that point */
if (partdir->z == 0) {
fprintf (stderr, "\nERROR: Something went wrong in calculation of projection.\n\n");
exit (1);
} else {
a = ((*cutdist) - partbeg->z)/ partdir->z;
y1 = a * partdir->y + partbeg->y;
if ( projectdir->x * partdir->y > 0 ) {
pextr3->x = a * partdir->x + partbeg->x;
pextr3->y = y1;
pextr4->x = pend->x;
pextr4->y = pend->y;
}else {
pextr3->x = pend->x;
pextr3->y = pend->y;
pextr4->x = a * partdir->x + partbeg->x;
pextr4->y = y1;
}
}
}
} else {
return 0; /* if perpendicular to plane we don't have any intersections*/
}
}
return 1;
}
/*project a point in project direction to z plane z=0*/
void projectinz(struct vector* vec1, struct vector* projectdir,struct vector * projection)
{
projection->x = vec1->x - vec1->z * projectdir->x/projectdir->z;
projection->y = vec1->y - vec1->z * projectdir->y/projectdir->z;
projection->z = 0;
}
/*calculates area defined by four points in z=0 plane */
double areafourpoints(struct vector * pbeg, struct vector * pend, struct vector * pbeg1, struct vector * pend1 )
{
double area =0.0;
struct vector vec1,vec2;
/*area by four points... two half vector cross product
|(pbegining1-pbegining)x(pend-pbegining)|/2 */
vec1.x = pbeg1->x - pbeg->x;
vec1.y = pbeg1->y - pbeg->y;
vec2.x = pend->x - pbeg->x;
vec2.y = pend->y - pbeg->y;
//printf("a: %f %f %f %f \n",vec1.x,vec2.y,vec1.y,vec2.x);
area += fabs(vec1.x*vec2.y - vec1.y*vec2.x)*0.5;
/* + |(pend-pend1)x(pbegining1-pend1)|/2*/
vec1.x = pend->x - pend1->x;
vec1.y = pend->y - pend1->y;
vec2.x = pbeg1->x - pend1->x;
vec2.y = pbeg1->y - pend1->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
return area;
}
/*calculates area defined by six points in z=0 plane */
double areaeightpoints(struct vector * p1, struct vector * p2, struct vector * p3, struct vector * p4,
struct vector * p5, struct vector * p6,struct vector * p7, struct vector * p8)
{
double area =0.0;
struct vector vec1,vec2;
/*area by half vector cross product
|(pbegining-pbegining)x(pend-pbegining)|/2 */
vec1.x = p2->x - p1->x;
vec1.y = p2->y - p1->y;
vec2.x = p3->x - p2->x;
vec2.y = p3->y - p2->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("3");
if (p4 != NULL) {
vec1.x = p3->x - p1->x;
vec1.y = p3->y - p1->y;
vec2.x = p4->x - p3->x;
vec2.y = p4->y - p3->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("4");
if (p5 != NULL) {
vec1.x = p4->x - p1->x;
vec1.y = p4->y - p1->y;
vec2.x = p5->x - p4->x;
vec2.y = p5->y - p4->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("5");
if (p6 != NULL) {
vec1.x = p5->x - p1->x;
vec1.y = p5->y - p1->y;
vec2.x = p6->x - p5->x;
vec2.y = p6->y - p5->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("6");
if (p7 != NULL) {
vec1.x = p6->x - p1->x;
vec1.y = p6->y - p1->y;
vec2.x = p7->x - p6->x;
vec2.y = p7->y - p6->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("7");
if (p8 != NULL) {
vec1.x = p7->x - p1->x;
vec1.y = p7->y - p1->y;
vec2.x = p8->x - p7->x;
vec2.y = p8->y - p7->y;
area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5;
//printf("8");
}
}
}
}
}
return area;
}
/*..............................................................................*/
/*........................INPUT OUTPUT..........................................*/
/*..............................................................................*/
/*..............................................................................*/
/**
* convert string num into two integers
*/
void readii(char * num, int value[2]){
char *end, *num2;
void trim (char *);
value[0] = strtol(num, &num2, 10);
trim(num2);
if ((int)strlen(num2) > 0)
value[1] = strtol(num2, &end, 10);
else {
value[1] =0;
return;
}
if(*end){
fprintf(stderr, "Could not convert %s into two integers\n", num);
exit(1);
}
return;
}
/**
* convert string num into integer
*/
int readi(char * num){
char *end;
int i = strtol(num, &end, 10);
if(*end){
fprintf(stderr, "Could not convert %s into integer\n", num);
exit(1);
}
return (int) i;
}
/**
* convert string num into long
*/
long readl(char * num){
char *end;
long i = strtol(num, &end, 10);
if(*end){
fprintf(stderr, "Could not convert %s into long\n", num);
exit(1);
}
return i;
}
/**
* convert string num into double
*/
double readd(char * num){
char *end;
double i = strtod(num, &end);
if(*end){
fprintf(stderr, "Could not convert %s into double\n", num);
exit(1);
}
return i;
}
/*
Reads the run parameters from the external file "options". See the end of the
code for a template. All comments starting with '#' are stripped out. The
options are summarised on standard output and checked for validity of range.
*/
void read_options(struct sim* sim,char filename[30])
{
int i;
int num_options = -1;
double transmx, rotmx, chainmmx, chainrmx;
double angle, chain_angle;
char *id, *value, *tokLine, *line;
FILE *infile;
void strip_comment (char *);
void trim (char *);
void readii(char * num, int value[2]);
double readd(char *);
long readl(char *);
int readi(char *);
/* for new options add before the last line */
Option options[] = {
{"write_cluster", Long, FALSE, &sim->write_cluster},
{"adjust", Long, FALSE, &sim->adjust},
{"movie", Long, FALSE, &sim->movie},
{"nequil", Long, FALSE, &sim->nequil},
{"nsweeps", Long, FALSE, &sim->nsweeps},
{"nrepchange", Long, FALSE, &sim->nrepchange},
{"paramfrq", Long, FALSE, &sim->paramfrq},
{"report", Long, FALSE, &sim->report},
{"seed", Long, FALSE, &seed},
{"pairlist_update", Int, FALSE, &sim->pairlist_update},
{"ptype", Int, FALSE, &sim->ptype},
{"wlm", Int2, FALSE, &sim->wlm},
{"wlmtype", Int, FALSE, &sim->wl.wlmtype},
{"press", Double, FALSE, &sim->press},
{"paralpress", Double, FALSE, &sim->paralpress},
{"edge_mx", Double, FALSE, &sim->edge.mx},
{"shave", Double, FALSE, &sim->shave},
{"chainprob", Double, FALSE, &sim->chainprob},
{"switchprob", Double, FALSE, &sim->switchprob},
{"temper", Double, FALSE, &sim->temper},
{"paraltemper", Double, FALSE, &sim->paraltemper},
{"transmx", Double, FALSE, &transmx},
{"rotmx", Double, FALSE, &rotmx},
{"chainmmx", Double, FALSE, &chainmmx},
{"chainrmx", Double, FALSE, &chainrmx},
{"last", Int, FALSE, NULL}
};
while(options[++num_options].var != NULL)
;
/*--- 1. Read in values ---*/
size_t line_size = (STRLEN + 1) * sizeof(char);
line = (char *) malloc(line_size);
infile = fopen(filename, "r");
if (infile == NULL) {
fprintf (stderr, "\nERROR: Could not open options file.\n\n");
exit (1);
}
while(getline(&line, &line_size, infile) != -1){
// strip comments
strip_comment(line);
trim(line);
if(strlen(line) == 0){
continue;
}
// tokenize
tokLine = line;
id = strtok(tokLine, "=");
if(id == NULL){
fprintf(stderr, "error parsing Configuration line (%s)", line);
free(line);
exit(1);
}
trim(id);
tokLine = NULL;
value = strtok(tokLine, "=");
trim(value);
if(value == NULL){
fprintf(stderr, "error parsing Configuration line (%s)", line);
free(line);
exit(1);
}
//printf("id: %s; value: %s\n", id, value);
for(i = 0; i < num_options; i++){
if(strcmp(id, options[i].id) == 0){
if(options[i].type == Int2){
readii(value,*((int (*)[2]) options[i].var));
options[i].set = TRUE;
break;
}
if(options[i].type == Int){
*((int *) options[i].var) = readi(value);
options[i].set = TRUE;
break;
}
else if(options[i].type == Long){
*((long *) options[i].var) = readl(value);
options[i].set = TRUE;
break;
}
else if(options[i].type == Double){
*((double *) options[i].var) = readd(value);
options[i].set = TRUE;
break;
}
else {
fprintf(stderr, "Could not determine type of %s!\n", id);
free(line);
exit(1);
}
}
}
if(i == num_options){
fprintf(stderr, "Unknown identifier %s!\nWill procede.\n", id);
}
}
fclose (infile);
free(line);
/* Check, wheter all options have been readin */
for(i = 0; i < num_options; i++){
if(!options[i].set){
fprintf(stderr, "option '%s' is not set!\n", options[i].id);
exit(1);
}
}
/*--- 2. Summarize results on standard output ---*/
/* Density of close-packed spherocylinders */
// rho_cp = 2.0/(sqrt(2.0) + *length * sqrt(3.0));
printf (" Pressure coupling type: %d\n", sim->ptype);
printf (" Pressure: %.8lf\n", sim->press);
printf (" Replica exchange pressure: %.8lf\n", sim->paralpress);
printf (" Average volume change attempts per sweep: %.8lf\n", sim->shave);
printf (" Equilibration sweeps: %ld\n", sim->nequil);
printf (" Sweeps between step size adjustments: %ld\n", sim->adjust);
printf (" Production sweeps: %ld\n", sim->nsweeps);
printf (" Sweeps between statistics samples: %ld\n", sim->paramfrq);
printf (" Sweeps between statistics reports: %ld\n", sim->report);
printf (" Average chain move attempts per sweep: %.8lf\n", sim->chainprob);
printf (" Initial maximum displacement: %.8lf\n", transmx);
printf (" Inititial maximum angular change (degrees): %.8lf\n", rotmx);
printf (" Inititial maximum box edge change: %.8lf\n", sim->edge.mx);
printf (" Initial maximum chain displacement: %.8lf\n", chainmmx);
printf (" Inititial maximum chain angular change (degrees): %.8lf\n", chainrmx);
printf (" Temperature in kT/e: %.8lf\n", sim->temper);
printf (" Parallel tempering temperature in kT/e: %.8lf\n", sim->paraltemper);
printf (" Sweeps between replica exchange: %ld\n", sim->nrepchange);
printf (" Wang-Landau method: %d %d\n", sim->wlm[0],sim->wlm[1]);
printf (" Calculate the Wang-Landau method for atom type: %d\n", sim->wl.wlmtype);
printf (" Average type switch attempts per sweep: %.8lf\n", sim->switchprob);
printf (" Number of Sweeps per pairlist update: %d\n", sim->pairlist_update);
printf (" Random number seed: %ld\n", seed);
printf (" Number of sweeps per writing out cluster info: %ld\n", sim->write_cluster);
if (sim->movie > 0) {
printf (" Sweeps between movie frames: %ld\n", sim->movie);
} else {
printf (" No movie\n");
}
printf ("\n");
if(sim->pairlist_update){
printf(" A pairlist will be generated every %d steps. This is a greedy"
" algorithm; make sure you don't have big chains etc.!\n",
sim->pairlist_update);
}
/*--- 3. Validity checks ---*/
if (rotmx < 0.0 || rotmx > 180) {
fprintf (stderr, "ERROR: Maximum orientation change must be in range 0 to 180.\n\n");
exit (1);
}
if (chainrmx < 0.0 || chainrmx > 180) {
fprintf (stderr, "ERROR: Maximum orientation change for chains must be in range 0 to 180.\n\n");
exit (1);
}
if ( (sim->ptype <0) || (sim->ptype>3) ) {
fprintf (stderr, "ERROR: Unknown pressure coupling %d. Program only knows: 0 - anisotropic coupling, \
1 - isotropic coupling, 2 - isotropic in xy z=const, 3 - isotropic xy V=const.\n\n",sim->ptype);
exit (1);
}
if ( (sim->wlm[0] <0) || (sim->wlm[0] > 7) || (sim->wlm[1] <0) || (sim->wlm[1] > 7) ) {
fprintf (stderr, "ERROR: Unknown Wang-Landau method %d %d. Program only knows: 0 - none, \
1 - z-direction od 1st particle, 2 - pore in membrane, 3 - zorientation of 0th particle,\
4 - distance of fist two particles, 5 - pore around z-axis above CM,\
6 - pore around z-axis above 0th particle, 7 - number of particles in contact \n\n",sim->wlm[0],sim->wlm[1]);
exit (1);
}
if ( (sim->wlm[0] == 0) && (sim->wlm[1] > 0) ) {
fprintf (stderr, "ERROR: Wang-Landau method has to be set for first order parameter and then for second order parameter\n\n");
exit (1);
}
if ( (sim->wlm[0] == 2) || (sim->wlm[0] == 5) || (sim->wlm[0] == 6) ) {
if(sim->wl.wlmtype < 1){
fprintf (stderr, "ERROR: Atom type for the Wang-Landau Method (%d) was false defined.\n\n",sim->wl.wlmtype);
exit (1);
}
if ( (sim->wlm[1] == 2) || (sim->wlm[1] == 5) || (sim->wlm[1] == 6) ) {
fprintf (stderr, "ERROR: Simulaneous use of two pore order parameters has not been implemented yet.\n\n");
exit (1);
}
}
/* we store maximum rotation as half angle - useful for quaterions*/
angle = rotmx / 180.0 * PIH *0.5;
rotmx = cos((rotmx)/180.0*PIH);
chain_angle = chainrmx / 180.0 * PIH;
chainrmx = cos((chainrmx)/180.0*PIH);
sim->edge.mx *= 2.0; /* The full range is -maxl to +maxl, i.e. spanning 2*maxl */
transmx *= 2.0; /* The full range is -maxr to +maxr, i.e. spanning 2*maxr */
chainmmx *= 2.0; /* The full range is -maxr to +maxr, i.e. spanning 2*maxr */
for (i=0;i<MAXT;i++) {
sim->trans[i].mx = transmx;
sim->rot[i].mx = rotmx;
sim->rot[i].angle = angle;
}
for (i=0;i<MAXMT;i++) {
sim->chainm[i].mx = chainmmx;
sim->chainr[i].mx = chainrmx;
sim->chainr[i].angle = chain_angle;
}
//parallel tempering
#ifdef MPI
if ( (sim->temper != sim->paraltemper) && (sim->mpinprocs <2) ) {
printf("ERROR: Paralllel tempering at single core does not work.\n\n");
exit(1);
}
sim->dtemp = (sim->paraltemper - sim->temper )/(sim->mpinprocs-1);
sim->temper += sim->dtemp * sim->mpirank;
if ( (sim->press != sim->paralpress) && (sim->mpinprocs <2) ) {
printf("ERROR: Pressure replica exchange at single core does not work.\n\n");
exit(1);
}
sim->dpress = (sim->paralpress - sim->press )/(sim->mpinprocs-1);
sim->press += sim->dpress * sim->mpirank;
seed += sim->mpirank;
sim->mpiexch.mx = sim->dtemp;
sim->mpiexch.angle = sim->dpress;
#endif
}
/*..............................................................................*/
/*
Used by read_options to read a long integer with error checking.
NOT USED ANYMORE
*/
long read_long(FILE *infile, char *error) {
char *gotline;
char line[500];
int fields;
long value;
gotline = fgets(line, sizeof(line), infile);
fields = sscanf(line, "%ld", &value);
if (gotline == NULL || fields != 1) {
fprintf (stdout, "\nERROR reading %s from options file.\n\n", error);
exit (1);
}
return value;
}
/*
Used by read_options to read a long integer with error checking.
NOT USED ANYMORE
*/
int read_int(FILE *infile, char *error) {
char *gotline;
char line[500];
int fields;
int value;
gotline = fgets(line, sizeof(line), infile);
fields = sscanf(line, "%d", &value);
if (gotline == NULL || fields != 1) {
fprintf (stdout, "\nERROR reading %s from options file.\n\n", error);
exit (1);
}
return value;
}
/*..............................................................................*/
/*
Used by read_options to read a double precision with error checking.
NOT USED ANYMORE
*/
double read_double(FILE *infile, char *error) {
char *gotline;
char line[500];
int fields;
double value;
gotline = fgets(line, sizeof(line), infile);
fields = sscanf(line, "%le", &value);
if (gotline == NULL || fields != 1) {
fprintf (stdout, "\nERROR reading %s from options file.\n\n", error);
exit (1);
}
return value;
}
/*..............................................................................*/
/****************************************************************************
* CONFIG INITIALIZATION
*****************************************************************************/
/*
Reads in the initial configuration from the file "config.init". Each line
contains the three components of the position vector and three components of
the direction vector and three components of patch direction for a spherocylinder.
The direction vector is normalised
after being read in. The configuration is checked for particle overlaps.
*/
void init_config(struct topo * topo, struct conf * conf, struct sim * sim, char filename[30])
{
int err,fields,tmp_type;
long i,j,current,first;
FILE * infile;
char * line, line2[STRLEN];
size_t line_size = (STRLEN + 1) * sizeof(char);
line = (char *) malloc(line_size);
struct particles chorig[MAXCHL];
int overlap(struct particles, struct particles, struct vector,
struct ia_param [MAXT][MAXT]);
void normalise(struct vector *);
void ortogonalise(struct vector *, struct vector);
void usepbc(struct vector *, struct vector);
double anint(double);
void strip_comment (char *);
void trim (char *);
void aftercommand(char *, char *, char);
double maxlength = 0;
for(i = 0; i < MAXT; i++){
if(maxlength < topo->ia_params[i][i].len[0])
maxlength = topo->ia_params[i][i].len[0];
}
infile = fopen(filename, "r");
if (infile == NULL) {
fprintf (stderr, "\nERROR: Could not open config.init file.\n\n");
exit (1);
}
if(getline(&line, &line_size, infile) == -1){
fprintf (stderr, "ERROR: Could not read box size.\n\n");
exit (1);
}
strip_comment(line);
trim(line);
if (sscanf(line, "%le %le %le", &(conf->box.x), &(conf->box.y), &(conf->box.z)) != 3) {
if(getline(&line, &line_size, infile) == -1){
fprintf (stderr, "ERROR: Could not read box size.\n\n");
exit (1);
}
aftercommand(line2,line,BOXSEP);
strip_comment(line2);
trim(line2);
if (sscanf(line2, "%le %le %le", &(conf->box.x), &(conf->box.y), &(conf->box.z)) != 3) {
fprintf (stderr, "ERROR: Could not read box size.\n\n");
exit (1);
}
}
if (conf->box.x < maxlength * 2.0 + 2.0) {
printf ("WARNING: x box length is less than two spherocylinders long.\n\n");
}
if (conf->box.y < maxlength * 2.0 + 2.0) {
printf ("WARNING: y box length is less than two spherocylinders long.\n\n");
}
if (conf->box.z < maxlength * 2.0 + 2.0) {
printf ("WARNING: z box length is less than two spherocylinders long.\n\n");
}
DEBUG_INIT("Position of the particle");
for (i=0; i < topo->npart; i++) {
if(getline(&line, &line_size, infile) == -1){
break;
}
strip_comment(line);
trim(line);
fields = sscanf(line, "%le %le %le %le %le %le %le %le %le %d",
&conf->particle[i].pos.x, &conf->particle[i].pos.y, &conf->particle[i].pos.z,
&conf->particle[i].dir.x, &conf->particle[i].dir.y, &conf->particle[i].dir.z,
&conf->particle[i].patchdir[0].x, &conf->particle[i].patchdir[0].y, &conf->particle[i].patchdir[0].z,
&conf->particle[i].switched);
conf->particle[i].patchdir[1].x = conf->particle[i].patchdir[1].y = conf->particle[i].patchdir[1].z =0;
conf->particle[i].chdir[0].x = conf->particle[i].chdir[0].y = conf->particle[i].chdir[0].z =0;
conf->particle[i].chdir[1].x = conf->particle[i].chdir[1].y = conf->particle[i].chdir[1].z =0;
DEBUG_INIT("Line: %s\nNumber of Fields: %d", line, fields);
if (fields == 9){
conf->particle[i].switched = 0;
fprintf(stdout, "WARNING: Particle %ld is assumed to be not switched!\n", i+1);
fields++;
}
if (fields != 10) {
fprintf (stderr, "ERROR: Could not read coordinates for particle %ld.\n \
Did you specify box size at the begining?\n\n", i+1);
free(line);
exit (1);
}
/* Scale position vector to the unit cube */
usepbc(&conf->particle[i].pos, conf->box );
conf->particle[i].pos.x /= conf->box.x;
conf->particle[i].pos.y /= conf->box.y;
conf->particle[i].pos.z /= conf->box.z;
if ((topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0]<SP)&&( DOT(conf->particle[i].dir, conf->particle[i].dir) < ZEROTOL )) {
//DEBUG_INIT("Geotype = %d < %d", conf->particle[i].geotype,SP);
fprintf (stderr,
"ERROR: Null direction vector supplied for particle %ld.\n\n", i+1);
free(line);
exit (1);
} else {
normalise(&conf->particle[i].dir);
}
if ((topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0]<SP)&&( DOT(conf->particle[i].patchdir[0], conf->particle[i].patchdir[0]) < ZEROTOL )) {
fprintf (stderr,
"ERROR: Null patch vector supplied for particle %ld.\n\n", i+1);
free(line);
exit (1);
} else {
ortogonalise(&conf->particle[i].patchdir[0],conf->particle[i].dir);
normalise(&conf->particle[i].patchdir[0]);
}
// Switch the type
if(conf->particle[i].switched){
if(conf->particle[i].switchtype == 0){
fprintf(stderr, "ERROR: Particle %ld switched even though it has no switchtype", i);
free(line);
exit(1);
}
tmp_type = conf->particle[i].type;
conf->particle[i].type = conf->particle[i].switchtype;
conf->particle[i].switchtype = tmp_type;
}
DEBUG_INIT("%ld:\t%lf\t%lf\t%lf", i, conf->particle[i].pos.x, conf->particle[i].pos.y, conf->particle[i].pos.z);
}
free(line);
/*Make chains WHOLE*/
for (i=0;i<topo->chainnum;i++){
j=0;
current = topo->chainlist[i][0];
first = current;
chorig[0].pos = conf->particle[first].pos;
while (current >=0 ) {
/*shift the chain particle by first one*/
conf->particle[current].pos.x -= chorig[0].pos.x;
conf->particle[current].pos.y -= chorig[0].pos.y;
conf->particle[current].pos.z -= chorig[0].pos.z;
/*put it in orig box*/
conf->particle[current].pos.x -= anint(conf->particle[current].pos.x);
conf->particle[current].pos.y -= anint(conf->particle[current].pos.y);
conf->particle[current].pos.z -= anint(conf->particle[current].pos.z);
//printf("ant: %f %f %f\n",conf->particle[current].pos.x,conf->particle[current].pos.y,conf->particle[current].pos.z);
/*shot it back*/
conf->particle[current].pos.x += chorig[0].pos.x;
conf->particle[current].pos.y += chorig[0].pos.y;
conf->particle[current].pos.z += chorig[0].pos.z;
//printf("posstart: %f %f %f\n",conf->particle[current].pos.x,conf->particle[current].pos.y,conf->particle[current].pos.z);
j++;
current = topo->chainlist[i][j];
}
}
err = 0;
//for (i=0; i < topo->npart-1; i++) {
// for (j=i+1; j < topo->npart; j++) {
// if ( overlap(conf->particle[i], conf->particle[j], conf->box, topo->ia_params) ) {
// fprintf (stderr,
// "ERROR: Overlap in initial coniguration between particles %ld and %ld.\n",
// i+1, j+1);
// err = 1;
// }
// }
//}
if (err) {
printf ("\n");
exit (1);
}
fclose (infile);
fflush (stdout);
}
/*..............................................................................*/
/****************************************************************************
* TOPOLOGY INITIALIZATION
*****************************************************************************/
/*
Create lists for chain operations: Connectivity list where it is written for each sc
with which sc it is connected. The order is important because spherocylinders have direction
First is interacting tail then head. Chain list where particles are assigned to chains to
which they belong
*/
void init_top(struct topo * topo, struct conf * conf, struct sim * sim,char filename[30])
{
long i,j,k,mol,maxch,maxpart;
FILE *infile;
char *pline=NULL, *dummy=NULL, *sysnames[MAXN];
char line[STRLEN], keystr[STRLEN], molname[STRLEN];
unsigned size;
long *sysmoln /*[MAXN]*/;
BOOL exclusions[MAXT][MAXT];
char *fgets2(char *, int , FILE *);
void strip_comment (char *);
void trim(char *);
int continuing(char *);
void upstring (char *);
int filltypes(char **, struct topo * topo);
int fillexter(char **, struct topo * topo);
int fillexclusions(char **, BOOL (*exclusions)[MAXT][MAXT]);
void beforecommand(char *, char *, char);
int fillmol(char *, char *, struct molecule * molecules, struct topo * topo);
int fillsystem(char *, char *[MAXN], long **);
void initparams(struct topo * topo);
void genparampairs(struct topo * topo, BOOL (*exclusions)[MAXT][MAXT]);
int topdealoc(char **, char *[MAXN], long **, struct molecule *);
struct molecule molecules[MAXMT];
if ((infile = fopen(filename, "r")) == NULL) {
fprintf (stderr, "\nTOPOLOGY ERROR: Could not open top.init file.\n\n");
exit (1);
}
fprintf (stdout, "Initialize chainlist...\n");
fflush(stdout);
sysmoln = malloc( sizeof(long)*MAXN);
if(sysmoln == NULL){
fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sysmoln");
exit(1);
}
struct particles tmp_particles[MAXN];
for (i=0;i<MAXN;i++) {
if (i < MAXMT) {
topo->chainparam[i].bond1eq = -1;
topo->chainparam[i].bond1c = -1;
topo->chainparam[i].bond2eq = -1;
topo->chainparam[i].bond2c = -1;
topo->chainparam[i].bonddc = -1;
topo->chainparam[i].angle1eq = -1;
topo->chainparam[i].angle1c = -1;
topo->chainparam[i].angle2eq = -1;
topo->chainparam[i].angle2c = -1;
molecules[i].name = NULL;
molecules[i].type = malloc(sizeof(long)*MAXN);
molecules[i].switchtype = malloc(sizeof(long)*MAXN);
molecules[i].delta_mu = malloc(sizeof(double)*MAXN);
for (j=0;j<MAXN;j++) {
molecules[i].type[j] = -1;
}
}
for (j = 0; j < MAXCHL; j++){
topo->chainlist[i][j] = -1;
}
sysnames[i]=NULL;
}
for (i=0;i<MAXT;i++) {
for (j=0;j<MAXT;j++) {
exclusions[i][j]=FALSE;
}
}
topo->exter.exist = FALSE;
topo->exter.thickness = 0.0;
topo->exter.epsilon = 0.0;
topo->exter.attraction = 0.0;
topo->exter.sqmaxcut = 0.0;
for(i = 0; i < MAXT; i++){
for(j = 0; j < MAXT; j++){
for(k = 0; k < 2; k++){
topo->ia_params[i][j].geotype[k] = 0;
}
}
}
fprintf (stdout, "Reading topology...\n");
fflush(stdout);
molname[0] = ' ';
initparams(topo);
pline=malloc((size_t)STRLEN);
while (fgets2(line,STRLEN-2,infile) != NULL) {
strcpy(pline,line);
if (!pline) fprintf (stderr, "\nTOPOLOGY ERROR: Empty line in topology.\n\n");
/* build one long line from several fragments */
while (continuing(line) && (fgets2(line,STRLEN-1,infile) != NULL)) {
size=strlen(pline)+strlen(line)+1;
free(pline);
pline=malloc((size_t)size);
strcat(pline,line);
}
/* skip trailing and leading spaces and comment text */
strip_comment (pline);
trim (pline);
/* if there is something left... */
if ((int)strlen(pline) > 0) {
// get the [COMMAND] key
if (pline[0] == OPENKEY) {
pline[0] = ' ';
beforecommand(keystr,pline,CLOSEKEY);
upstring (keystr);
} else {
//DEBUG fprintf (stdout, "Topology read type:%s, %s \n",keystr,pline);
if (!strcmp(keystr,"TYPES")) {
fflush(stdout);
if (!filltypes(&pline, topo)) {
DEBUG_INIT("Something went wrong with filltypes");
fprintf (stderr, "\nTOPOLOGY ERROR: in reading types\n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
DEBUG_INIT("back in init_top");
} else{
if (!strcmp(keystr,"MOLECULES")){
DEBUG_INIT("Let's go to the molecules");
if (molname[0] == ' ') {
beforecommand(molname,pline,SEPARATOR);
i=0;
while (molecules[i].name != NULL)
i++;
DEBUG_INIT("in the middle of getting to fillmol");
molecules[i].name = malloc(strlen(molname)+1);
strcpy(molecules[i].name, molname);
fprintf (stdout, "Topology read for molecule: %s \n",molname);
}
if (!fillmol(molname, pline, molecules, topo)) {
fprintf (stderr, "\nTOPOLOGY ERROR: in reading molecules\n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
if ((dummy = strchr (pline,CLOSEMOL)) != NULL)
molname[0] = ' ';
} else {
if (!strcmp(keystr,"SYSTEM")) {
if (!fillsystem(pline,sysnames,&sysmoln)) {
fprintf (stderr, "\nTOPOLOGY ERROR: in reading system\n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
} else {
if (!strcmp(keystr,"EXTER")) {
fflush(stdout);
if (!fillexter(&pline, topo)) {
DEBUG_INIT("Something went wrong with external potential");
fprintf (stderr, "\nTOPOLOGY ERROR: in reading external potential\n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
} else {
if (!strcmp(keystr,"EXCLUDE")) {
fflush(stdout);
if (!fillexclusions(&pline,&exclusions)) {
DEBUG_INIT("Something went wrong with exclusions potential");
fprintf (stderr, "\nTOPOLOGY ERROR: in reading exclusions\n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
} else {
fprintf (stderr, "\nTOPOLOGY ERROR: invalid keyword:%s.\n\n", keystr);
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
}
}
}
}
}
}
}
/*we have sucessfully read topology*/
if (pline !=NULL) free(pline);
pline=NULL;
fclose (infile);
fflush (stdout);
/*fill ia_params combinations*/
fprintf (stdout, "\nTopology succesfully read. Generating pair interactions...\n");
genparampairs(topo,&exclusions);
double maxlength = 0;
for(i = 0; i < MAXT; i++){
if(maxlength < topo->ia_params[i][i].len[0])
maxlength = topo->ia_params[i][i].len[0];
}
topo->sqmaxcut += maxlength+2;
topo->sqmaxcut *= 1.1;
topo->maxcut = topo->sqmaxcut;
topo->sqmaxcut = topo->sqmaxcut*topo->sqmaxcut;
topo->exter.sqmaxcut += maxlength;
topo->exter.sqmaxcut *= topo->exter.sqmaxcut*1.1;
/*TODO fill chain list and maxch, park particle type*/
fprintf (stdout, "Generating chainlist...\n");
maxch=0;
maxpart=0;
i=0;
while (sysnames[i]!=NULL) {
mol=0;
while (strcmp(molecules[mol].name,sysnames[i])) {
mol++;
if (molecules[mol].name == NULL) {
fprintf (stderr, "TOPOLOGY ERROR: molecules %s is not defined.\n\n",sysnames[i]);
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit(1);
}
}
for (j=0;j<sysmoln[i];j++) {
//DEBUG fprintf (stdout, "molnames %s sysname %s sysnum %ld \n",molnames[mol],sysnames[i],sysmoln[i]);
k=0;
while (molecules[mol].type[k] != -1) {
tmp_particles[maxpart].type = molecules[mol].type[k];
tmp_particles[maxpart].switchtype = molecules[mol].switchtype[k];
tmp_particles[maxpart].delta_mu = molecules[mol].delta_mu[k];
tmp_particles[maxpart].chaint = mol;
tmp_particles[maxpart].chainn = maxch;
if (k > MAXCHL) {
fprintf (stderr, "TOPOLOGY ERROR: more particles in chan (%ld) than allowed(%d).\n",k,MAXCHL);
fprintf (stderr, "Change MAXCHL in source and recompile the program. \n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit(1);
}
if (molecules[mol].type[1] != -1) {
topo->chainlist[maxch][k] = maxpart;
}
k++;
maxpart++;
if (maxpart > MAXN) {
fprintf (stderr, "TOPOLOGY ERROR: more particles(%ld) than allowed(%d).\n",maxpart,MAXN);
fprintf (stderr, "Change MAXN in source and recompile the program. \n\n");
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit(1);
}
}
if (molecules[mol].type[1] != -1) {
maxch++;
}
}
i++;
}
topo->npart = maxpart;
/* write the particles from the temporary to the "permanent" conf */
conf->particle = malloc(sizeof(struct particles) * topo->npart);
if(conf->particle == NULL){
fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for conf->particle");
exit(1);
}
for(i = 0; i < topo->npart; i++){
conf->particle[i].type = tmp_particles[i].type;
conf->particle[i].switchtype = tmp_particles[i].switchtype;
conf->particle[i].delta_mu = tmp_particles[i].delta_mu;
conf->particle[i].chaint = tmp_particles[i].chaint;
conf->particle[i].chainn = tmp_particles[i].chainn;
}
/* Initialize the clusterlist */
sim->clusterlist = malloc(sizeof(long) * topo->npart);
if(sim->clusterlist == NULL){
fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sim->clusterlist!");
exit(1);
}
sim->clustersenergy = malloc(sizeof(double) * topo->npart);
if(sim->clustersenergy== NULL){
fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sim->clustersenergy!");
exit(1);
}
sim->clusters = NULL;
/* get all the particles with switch type */
long switchlist[topo->npart];
long n_switch_part = 0;
for(i = 0; i < topo->npart; i++){
if(conf->particle[i].type != conf->particle[i].switchtype){
switchlist[n_switch_part] = i;
n_switch_part++;
}
}
topo->n_switch_part = n_switch_part;
if (n_switch_part == 0 && sim->switchprob > 0){
fprintf(stderr, "TOPOLOGY WARNING: No switchable particles found, but probability for a switch is not zero!\n");
sim->switchprob = 0;
fprintf(stderr, "TOPOLOGY WARNING: We changed Switch Probability to zero in this run!\n");
}
topo->switchlist=NULL;
if (n_switch_part > 0){
topo->switchlist = malloc(sizeof(long) * n_switch_part);
for(i = 0; i < n_switch_part; i++){
topo->switchlist[i] = switchlist[i];
//DEBUG
//printf("%ld is in switchlist\n", switchlist[i]);
}
}
j = 0;
while (topo->chainlist[j][0] >= 0) {
j++;
}
topo->chainnum = j;
if (topo->chainnum != maxch) {
fprintf (stderr, "TOPOLOGY ERROR: Maximum number of chains(%ld) does not agree with number of chains (%ld)\n\n",maxch,topo->chainnum);
topdealoc(&pline,sysnames,&sysmoln, molecules);
exit (1);
}
k=0;
/*clear connectivity and then fill it from chain list*/
fprintf (stdout, "Generating connectivity...\n");
for (i=0; i<MAXN; i++) {
topo->conlist[i][0] = -1;
topo->conlist[i][1] = -1;
topo->conlist[i][2] = -1;
topo->conlist[i][3] = -1;
}
conf->sysvolume = 0;
for (i=0; i<maxpart; i++) {
for (j=0; j<MAXCHL; j++) {
if (topo->chainlist[i][j] >= 0) {
k = topo->chainlist[i][j];
if ((j+1 < MAXCHL)&&(topo->chainlist[i][j+1] >= 0))
topo->conlist[k][1] = topo->chainlist[i][j+1]; /*if there is a next particle fill it to head bond*/
if (j > 0)
topo->conlist[k][0] = topo->chainlist[i][j-1]; /*if this is not first particle fill tail bond*/
if ((j+2 < MAXCHL)&& (topo->chainlist[i][j+2] >= 0))
topo->conlist[k][3] = topo->chainlist[i][j+2]; /*if there is a second next particle fill it second neighbour*/
if (j > 1)
topo->conlist[k][2] = topo->chainlist[i][j-2]; /*if this is not second or first particle fill second tail bond*/
}
}
conf->sysvolume += topo->ia_params[conf->particle[i].type][conf->particle[i].type].volume;
}
/*DEBUG
for (i=0; i<MAXN; i++) {
for (j=0; j<MAXCHL; j++) {
fprintf (stderr, " %d",chainlist[i][j]);
}
fprintf (stderr, " \n");
}
for (i=0; i<MAXN; i++) {
printf (" %ld %ld %ld %ld\n",conlist[i][0],conlist[i][1],conlist[i][2],conlist[i][3]);
}
*/
// Mark particles as not switched
for(i = 0; i < maxpart; i++){
conf->particle[i].switched = 0;
}
topdealoc(&pline,sysnames,&sysmoln, molecules);
DEBUG_INIT("Finished with reading the topology");
/* Parallel tempering check */
#ifdef MPI
// probability to switch replicas = exp ( -0.5 * dT*dT * N / (1 + dT) )
printf("Probability to switch replicas is roughly: %f\n",exp(-0.5 * maxpart * sim->dtemp * sim->dtemp / (1.0 + sim->dtemp)) );
#endif
}
/*..........................................................................*/
/*dealocting memory for init_top*/
int topdealoc(char **pline,char *sysnames[MAXN], long **sysmoln, struct molecule * molecules)
{
long i;
if ((*pline) != NULL) free((*pline));
(*pline)=NULL;
if ((*sysmoln) != NULL) free((*sysmoln));
(*sysmoln)=NULL;
for (i=0;i<MAXN;i++) {
if (i < MAXMT) {
free(molecules[i].name);
free(molecules[i].type);
free(molecules[i].switchtype);
free(molecules[i].delta_mu);
}
if ((sysnames[i]) != NULL) free(sysnames[i]);
sysnames[i]=NULL;
}
return 0;
}
/* initiate vectors of a single particle*/
void int_partvec(long target, struct ia_param * ia_parami, struct conf * conf )
{
struct quat quatrot;
struct quat quat_create(struct vector, double, double);
void vec_rotate(struct vector *, struct quat);
void normalise(struct vector *);
void ortogonalise(struct vector *,struct vector);
if ( (ia_parami->geotype[0] == SCA) || (ia_parami->geotype[0] == SCN) ){
/*SCA and SCN are isotropic... nothing to initialize*/
return;
}
normalise (&conf->particle[target].dir);
ortogonalise(&conf->particle[target].patchdir[0],conf->particle[target].dir);
/*calculate patch sides*/
if ( (ia_parami->geotype[0] == PSC) || (ia_parami->geotype[0] == CPSC)
|| (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ){
/* rotate patch vector by half size of patch*/
conf->particle[target].patchsides[0] = conf->particle[target].patchdir[0];
quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[0], ia_parami->psinhalfi[0]);
vec_rotate(&(conf->particle[target].patchsides[0]),quatrot);
/*second side*/
conf->particle[target].patchsides[1] = conf->particle[target].patchdir[0];
quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[0], -1.0*ia_parami->psinhalfi[0]);
vec_rotate(&(conf->particle[target].patchsides[1]),quatrot);
}
/*calculate second patchdir*/
if ( (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ||
(ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC)){
conf->particle[target].patchdir[1] = conf->particle[target].patchdir[0];
quatrot=quat_create(conf->particle[target].dir, ia_parami->csecpatchrot[0], ia_parami->ssecpatchrot[0]);
vec_rotate(&(conf->particle[target].patchdir[1]),quatrot);
ortogonalise(&conf->particle[target].patchdir[1],conf->particle[target].dir);
}
/*calculate second patch sides*/
if ( (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ){
/* rotate patch vector by half size of patch*/
conf->particle[target].patchsides[2] = conf->particle[target].patchdir[1];
quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[2], ia_parami->psinhalfi[2]);
vec_rotate(&(conf->particle[target].patchsides[2]),quatrot);
/*second side*/
conf->particle[target].patchsides[3] = conf->particle[target].patchdir[1];
quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[2], -1.0*ia_parami->psinhalfi[2]);
vec_rotate(&(conf->particle[target].patchsides[3]),quatrot);
}
/*calculate chdir vector*/
if ( (ia_parami->geotype[0] == CHPSC) || (ia_parami->geotype[0] == CHCPSC)
|| (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC)){
conf->particle[target].chdir[0] = conf->particle[target].dir;
quatrot = quat_create(conf->particle[target].patchdir[0], ia_parami->chiral_cos[0], ia_parami->chiral_sin[0]);
vec_rotate(&(conf->particle[target].chdir[0]), quatrot);
/* rotate patch vector by half size of patch*/
conf->particle[target].patchsides[0] = conf->particle[target].patchdir[0];
quatrot=quat_create(conf->particle[target].chdir[0], ia_parami->pcoshalfi[0], ia_parami->psinhalfi[0]);
vec_rotate(&(conf->particle[target].patchsides[0]),quatrot);
/*second side*/
conf->particle[target].patchsides[1] = conf->particle[target].patchdir[0];
quatrot=quat_create(conf->particle[target].chdir[0], ia_parami->pcoshalfi[0], -1.0*ia_parami->psinhalfi[0]);
vec_rotate(&(conf->particle[target].patchsides[1]),quatrot);
}
/*calculate chdir vector for seond patch*/
if ( (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC) ){
conf->particle[target].chdir[1] = conf->particle[target].dir;
quatrot = quat_create(conf->particle[target].patchdir[1], ia_parami->chiral_cos[0], ia_parami->chiral_sin[0]);
vec_rotate(&(conf->particle[target].chdir[1]), quatrot);
/* rotate patch vector by half size of patch to get sides*/
conf->particle[target].patchsides[2] = conf->particle[target].patchdir[1];
quatrot=quat_create(conf->particle[target].chdir[1], ia_parami->pcoshalfi[2], ia_parami->psinhalfi[2]);
vec_rotate(&(conf->particle[target].patchsides[2]),quatrot);
/*second side*/
conf->particle[target].patchsides[3] = conf->particle[target].patchdir[1];
quatrot=quat_create(conf->particle[target].chdir[1], ia_parami->pcoshalfi[2], -1.0*ia_parami->psinhalfi[2]);
vec_rotate(&(conf->particle[target].patchsides[3]),quatrot);
}
}
/* calculate vectors on particles for speedup*/
void partvecinit(struct topo * topo, struct sim * sim, struct conf * conf )
{
long i;
void int_partvec(long target, struct ia_param *, struct conf * conf );
for(i = 0; i < topo->npart; i++){
if ( topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0] < SP)
int_partvec(i,&(topo->ia_params[conf->particle[i].type][conf->particle[i].type]),conf);
}
}
/*generate interations pairs*/
void genparampairs(struct topo * topo, BOOL (*exclusions)[MAXT][MAXT])
{
int i,j,k;
int a[2];
int len;
double length = 0; // The length of a PSC, currently only one is allow, ie implemented
for (i=0;i<MAXT;i++) {
for (j=0;j<MAXT;j++) {
if (i!=j) {
if((topo->ia_params[j][j].geotype[0] != 0) && (topo->ia_params[i][i].geotype[0] != 0)){
a[0] = i;
a[1] = j;
for(k = 0; k < 2; k++){
topo->ia_params[i][j].geotype[k] = topo->ia_params[a[k]][a[k]].geotype[0];
topo->ia_params[i][j].len[k] = topo->ia_params[a[k]][a[k]].len[0];
if (topo->ia_params[a[k]][a[k]].len[0] > 0){
if (length == 0){
length = topo->ia_params[a[k]][a[k]].len[0];
}
else if (length > 0){
if (length != topo->ia_params[a[k]][a[k]].len[0]){
fprintf(stderr, "Error: ");
fprintf(stderr, "Different lengths for spherocylinders have not been implemented yet!\n");
fprintf(stderr, "\tCheck the length of type %d!\n", a[k]);
exit(1);
}
}
}
topo->ia_params[i][j].half_len[k] = topo->ia_params[a[k]][a[k]].half_len[0];
/* Handle angles only, when geotype is a patchs sphero cylinder */
if(topo->ia_params[i][j].geotype[k] >= PSC && topo->ia_params[i][j].geotype[k] < SP){
topo->ia_params[i][j].pangl[k] = topo->ia_params[a[k]][a[k]].pangl[0];
topo->ia_params[i][j].panglsw[k] = topo->ia_params[a[k]][a[k]].panglsw[0];
topo->ia_params[i][j].pcangl[k] = cos(topo->ia_params[i][j].pangl[k]/2.0/180*PI);
topo->ia_params[i][j].pcanglsw[k] = cos((topo->ia_params[i][j].pangl[k]/2.0+topo->ia_params[i][j].panglsw[k])/180*PI);
topo->ia_params[i][j].pcoshalfi[k] = cos((topo->ia_params[i][j].pangl[k]/2.0+topo->ia_params[i][j].panglsw[k])/2.0/180*PI);
topo->ia_params[i][j].psinhalfi[k] = sqrt(1.0 - topo->ia_params[i][j].pcoshalfi[k] * topo->ia_params[i][j].pcoshalfi[k]);
}
/* Only when the PSC is chiral */
if( (topo->ia_params[i][j].geotype[k] == CHCPSC) || (topo->ia_params[i][j].geotype[k] == CHPSC) \
|| (topo->ia_params[i][j].geotype[k] == TCHCPSC) || (topo->ia_params[i][j].geotype[k] == TCHPSC) ){
topo->ia_params[i][j].chiral_cos[k] = topo->ia_params[a[k]][a[k]].chiral_cos[0];
topo->ia_params[i][j].chiral_sin[k] = topo->ia_params[a[k]][a[k]].chiral_sin[0];
}
/* Information of two patches */
if( (topo->ia_params[i][j].geotype[k] == TCPSC) || (topo->ia_params[i][j].geotype[k] == TPSC) \
|| (topo->ia_params[i][j].geotype[k] == TCHCPSC) || (topo->ia_params[i][j].geotype[k] == TCHPSC) ){
topo->ia_params[i][j].csecpatchrot[k] = topo->ia_params[a[k]][a[k]].csecpatchrot[0];
topo->ia_params[i][j].ssecpatchrot[k] = topo->ia_params[a[k]][a[k]].ssecpatchrot[0];
topo->ia_params[i][j].pangl[k+2] = topo->ia_params[a[k]][a[k]].pangl[2];
topo->ia_params[i][j].panglsw[k+2] = topo->ia_params[a[k]][a[k]].panglsw[2];
topo->ia_params[i][j].pcangl[k+2] = cos(topo->ia_params[i][j].pangl[k+2]/2.0/180*PI);
topo->ia_params[i][j].pcanglsw[k+2] = cos((topo->ia_params[i][j].pangl[k+2]/2.0+topo->ia_params[i][j].panglsw[k+2])/180*PI);
topo->ia_params[i][j].pcoshalfi[k+2] = cos((topo->ia_params[i][j].pangl[k+2]/2.0+topo->ia_params[i][j].panglsw[k+2])/2.0/180*PI);
topo->ia_params[i][j].psinhalfi[k+2] = sqrt(1.0 - topo->ia_params[i][j].pcoshalfi[k+2] * topo->ia_params[i][j].pcoshalfi[k+2]);
}
}
len = strlen(topo->ia_params[i][i].name);
strncpy(topo->ia_params[i][j].name, topo->ia_params[i][i].name, len + 1);
len = strlen(topo->ia_params[i][i].other_name);
strncpy(topo->ia_params[i][j].other_name, topo->ia_params[i][i].other_name, len + 1);
topo->ia_params[i][j].sigma = AVER(topo->ia_params[i][i].sigma,topo->ia_params[j][j].sigma);
topo->ia_params[i][j].epsilon = sqrt(topo->ia_params[i][i].epsilon * topo->ia_params[j][j].epsilon);
topo->ia_params[i][j].pswitch = AVER(topo->ia_params[i][i].pswitch,topo->ia_params[j][j].pswitch);
topo->ia_params[i][j].rcutwca = (topo->ia_params[i][j].sigma)*pow(2.0,1.0/6.0);
// Averaging of the flat part of attraction
topo->ia_params[i][j].pdis = AVER(topo->ia_params[i][i].pdis - topo->ia_params[i][i].rcutwca, \
topo->ia_params[j][j].pdis - topo->ia_params[j][j].rcutwca) + topo->ia_params[i][j].rcutwca;
topo->ia_params[i][j].rcut = topo->ia_params[i][j].pswitch+topo->ia_params[i][j].pdis;
// if not non-attractive == if attractive
if (!((topo->ia_params[i][j].geotype[0] % 10 == 0) || (topo->ia_params[i][j].geotype[1] % 10 == 0))){
if (topo->ia_params[i][j].rcutwca > topo->ia_params[i][j].rcut){
fprintf(stderr, "Error: Repulsive cutoff is larger than the attractive cutoff!\n");
fprintf(stderr, " between %d and %d: %lf > %lf\n", i, j, topo->ia_params[i][j].rcutwca, topo->ia_params[i][j].rcut);
}
}
if ( topo->ia_params[i][j].rcutwca > topo->sqmaxcut )
topo->sqmaxcut = topo->ia_params[i][j].rcutwca;
if ( topo->ia_params[i][j].rcut > topo->sqmaxcut )
topo->sqmaxcut = topo->ia_params[i][j].rcut;
}
}
}
/*filling interaction with external potential*/
if( (topo->exter.exist) && (topo->ia_params[i][i].geotype[0] != 0)){
/*use everything like for given particles except distance and attraction, which is generated as for other interactions*/
topo->exter.interactions[i] = topo->ia_params[i][i];
topo->exter.interactions[i].sigma = AVER(topo->ia_params[i][i].sigma, topo->exter.thickness);
topo->exter.interactions[i].rcutwca = (topo->exter.interactions[i].sigma)*pow(2.0,1.0/6.0);
topo->exter.interactions[i].epsilon = sqrt(topo->ia_params[i][i].epsilon * topo->exter.epsilon);
topo->exter.interactions[i].pswitch = AVER(topo->ia_params[i][i].pswitch, topo->exter.attraction);
topo->exter.interactions[i].pdis = AVER(topo->ia_params[i][i].pdis - topo->ia_params[i][i].rcutwca, 0.0) + topo->exter.interactions[i].rcutwca;
topo->exter.interactions[i].rcut = topo->exter.interactions[i].pswitch + topo->exter.interactions[i].pdis;
if (topo->exter.interactions[i].rcut > topo->exter.sqmaxcut ) topo->exter.sqmaxcut = topo->exter.interactions[i].rcut;
}
}
for (i=0;i<MAXT;i++) {
for (j=0;j<MAXT;j++) {
if ( (*exclusions)[i][j] )
topo->ia_params[i][j].epsilon = 0.0;
}
}
}
/*initialize parameters for interactions*/
void initparams(struct topo * topo)
{
int i,j,k;
for (i=0;i<MAXT;i++) {
for (j=0;j<MAXT;j++) {
for(k = 0; k < 2; k++){
topo->ia_params[i][j].geotype[k] = 0;
topo->ia_params[i][j].len[k] = 0.0;
topo->ia_params[i][j].half_len[k] = 0.0;
topo->ia_params[i][j].chiral_cos[k] = 0.0;
topo->ia_params[i][j].chiral_sin[k] = 0.0;
topo->ia_params[i][j].csecpatchrot[k] = 0.0;
topo->ia_params[i][j].ssecpatchrot[k] = 0.0;
}
for(k = 2; k < 4; k++){
topo->ia_params[i][j].pangl[k] = 0.0;
topo->ia_params[i][j].panglsw[k] = 0.0;
topo->ia_params[i][j].pcangl[k] = 0.0;
topo->ia_params[i][j].pcanglsw[k] = 0.0;
topo->ia_params[i][j].pcoshalfi[k] = 0.0;
topo->ia_params[i][j].psinhalfi[k] = 0.0;
}
topo->ia_params[i][j].sigma = 0.0;
topo->ia_params[i][j].epsilon = 0.0;
topo->ia_params[i][j].rcutwca = 0.0;
topo->ia_params[i][j].pdis = 0.0;
topo->ia_params[i][j].pswitch = 0.0;
topo->ia_params[i][j].rcut = 0.0;
topo->ia_params[i][j].volume = 0.0;
topo->ia_params[i][j].pvolscale = 0.0;
}
}
topo->sqmaxcut = 0;
}
/*...........................................................................*/
/*filling the system parameters*/
int fillsystem(char *pline, char *sysnames[MAXN], long **sysmoln)
{
int i,fields;
char zz[STRLEN];
void trim (char *);
trim(pline);
if (!pline) {
fprintf (stderr, "TOPOLOGY ERROR: obtained empty line in fil system.\n\n");
return 0;
}
i=0;
while (sysnames[i]!=NULL) i++;
fields = sscanf(pline, "%s %ld", zz, &(*sysmoln)[i]);
sysnames[i]=malloc(strlen(zz)+1);
strcpy(sysnames[i],zz);
if (fields != 2) {
fprintf (stderr, "TOPOLOGY ERROR: failed reading system from (%s).\n\n", pline);
return 0;
}
if ((*sysmoln)[i] < 1) {
fprintf (stderr, "TOPOLOGY ERROR: cannot have %ld number of molecules.\n\n", (*sysmoln)[i]);
return 0;
}
fprintf (stdout, "system: %s %ld\n",sysnames[i],(*sysmoln)[i]);
return 1;
}
/*filling the parameters for molecules*/
int fillmol(char *molname, char *pline, struct molecule * molecules, struct topo * topo)
{
DEBUG_INIT("fillmol just has been called!");
char str[STRLEN],str2[STRLEN],molcommand[STRLEN],molparams[STRLEN];
int i,j,fields;
double bondk,bonddist;
void trim (char *);
void upstring(char *);
void beforecommand(char *, char *, char);
void aftercommand(char *, char *, char);
beforecommand(str2, pline, CLOSEMOL);
aftercommand(str, str2, OPENMOL);
trim(str);
if (strlen(str) == 0) return 1;
beforecommand(molcommand,str,SEPARATOR);
aftercommand(molparams,str,SEPARATOR);
trim(molcommand);
trim(molparams);
upstring (molcommand);
DEBUG_INIT("molcommand: %s", molcommand);
DEBUG_INIT("molparams: %s", molparams);
i=0;
while (strcmp(molecules[i].name, molname))
i++;
j=0;
while (molecules[i].type[j] != -1)
j++;
if (!strcmp(molcommand,"PARTICLES")) {
fprintf (stdout, "particle %d: \t", j + 1);
fields = sscanf(molparams,"%ld %ld %lf",molecules[i].type + j, molecules[i].switchtype + j, molecules[i].delta_mu + j);
fprintf (stdout, "%ld ",molecules[i].type[j]);
if (fields == 1){
(molecules[i].switchtype[j]) = (molecules[i].type[j]);
(molecules[i].delta_mu[j]) = 0;
fields = 3;
} else{
fprintf(stdout, "(with switchtype: %ld and delta_mu: %lf)", molecules[i].switchtype[j], molecules[i].delta_mu[j]);
}
if (fields != 3) {
fprintf (stderr, "TOPOLOGY ERROR: could not read a pacticle.\n\n");
return 0;
}
fflush(stdout);
if (molecules[i].type[j] < 0) {
fprintf (stderr, "TOPOLOGY ERROR: pacticles include negative type.\n\n");
return 0;
}
if (molecules[i].type[j] > MAXT) {
fprintf (stderr, "TOPOLOGY ERROR: pacticles include type out of range 0-%ld.\n\n",(long)MAXT);
return 0;
}
fprintf (stdout, "\n");
return 1;
}
if (!strcmp(molcommand,"BOND1")) {
fields = sscanf(molparams, "%le %le ", &bondk, &bonddist);
if (fields < 2) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bond1, should be 2.\n\n");
return 0;
}
if (bonddist < 0) {
fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist);
return 0;
}
topo->chainparam[i].bond1c = bondk;
topo->chainparam[i].bond1eq = bonddist;
fprintf (stdout, "bond1: %f %f \n",topo->chainparam[i].bond1c,topo->chainparam[i].bond1eq);
return 1;
}
if (!strcmp(molcommand,"BOND2")) {
fields = sscanf(molparams, "%le %le ", &bondk, &bonddist);
if (fields < 2) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bond2, should be 2.\n\n");
return 0;
}
if (bonddist < 0) {
fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist);
return 0;
}
topo->chainparam[i].bond2c = bondk;
topo->chainparam[i].bond2eq = bonddist;
fprintf (stdout, "bond2: %f %f \n",topo->chainparam[i].bond2c,topo->chainparam[i].bond2eq);
return 1;
}
if (!strcmp(molcommand,"BONDD")) {
fields = sscanf(molparams, "%le %le ", &bondk, &bonddist);
if (fields < 2) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bondd, should be 2.\n\n");
return 0;
}
if (bonddist < 0) {
fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist);
return 0;
}
topo->chainparam[i].bonddc = bondk;
topo->chainparam[i].bonddeq = bonddist;
fprintf (stdout, "bondd: %f %f \n",topo->chainparam[i].bonddc,topo->chainparam[i].bonddeq);
return 1;
}
if (!strcmp(molcommand,"ANGLE1")) {
fields = sscanf(molparams, "%le %le ", &bondk, &bonddist);
if (fields < 2) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for angle1, should be 2.\n\n");
return 0;
}
if (bonddist < 0) {
fprintf (stderr, "TOPOLOGY ERROR: equilibrium angle cannot be negative: %f \n\n",bonddist);
return 0;
}
topo->chainparam[i].angle1c = bondk;
topo->chainparam[i].angle1eq = bonddist/180.0*PI;
fprintf (stdout, "angle1: %f %f \n",topo->chainparam[i].angle1c,topo->chainparam[i].angle1eq);
return 1;
}
if (!strcmp(molcommand,"ANGLE2")) {
fields = sscanf(molparams, "%le %le ", &bondk, &bonddist);
if (fields < 2) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for angle2, should be 2.\n\n");
return 0;
}
if (bonddist < 0) {
fprintf (stderr, "TOPOLOGY ERROR: equilibrium angle cannot be negative: %f \n\n",bonddist);
return 0;
}
topo->chainparam[i].angle2c = bondk;
topo->chainparam[i].angle2eq = bonddist/180.0*PI;
fprintf (stdout, "angle2: %f %f \n",topo->chainparam[i].angle2c,topo->chainparam[i].angle2eq);
return 1;
}
fprintf (stderr, "TOPOLOGY ERROR: unknown parameter: %s.\n\n",molcommand);
return 0;
}
/* Converts the geometrical type string into a number */
int convert_geotype(char * geotype){
if (strcmp(geotype, "CPSC") == 0)
return CPSC;
if (strcmp(geotype, "CHCPSC") == 0)
return CHCPSC;
if (strcmp(geotype, "SCA") == 0)
return SCA;
if (strcmp(geotype, "PSC") == 0)
return PSC;
if (strcmp(geotype, "CHPSC") == 0)
return CHPSC;
if (strcmp(geotype, "TCPSC") == 0)
return TCPSC;
if (strcmp(geotype, "TCHCPSC") == 0)
return TCHCPSC;
if (strcmp(geotype, "TPSC") == 0)
return TPSC;
if (strcmp(geotype, "TCHPSC") == 0)
return TCHPSC;
if (strcmp(geotype, "SPN") == 0)
return SPN;
if (strcmp(geotype, "SPA") == 0)
return SPA;
return 0;
}
/*filling the parameters of external potentail - wall. Returns 1 on succes.*/
int fillexter(char **pline, struct topo * topo)
{
int fields;
double param[3];
/* 0: thickness
* 1: epsilon
* 2: attraction
*/
char typestr[STRLEN], paramstr[STRLEN];
void trim (char *);
void beforecommand(char *, char *, char);
void aftercommand(char *, char *, char);
beforecommand(typestr, *pline, SEPARATOR);
aftercommand(paramstr, *pline, SEPARATOR);
fields = sscanf(paramstr, "%le %le %le", ¶m[0], ¶m[1], ¶m[2]);
if (fields >3) {
fprintf (stderr, "TOPOLOGY ERROR: too many parameters for external potential. We have \
thickness, epsilon, and attraction distance so far.\n\n");
return 0;
}
if (fields >0) {
topo->exter.exist = TRUE;
topo->exter.thickness = param[0];
fprintf(stdout, "External potential with thickness: %le ",topo->exter.thickness);
if (fields >1) {
topo->exter.epsilon = param[1];
fprintf(stdout, "epsilon: %le ",topo->exter.epsilon);
if (fields >2) {
topo->exter.attraction = param[2];
fprintf(stdout, "and range of attraction: %le ",topo->exter.attraction);
}
}
} else{
topo->exter.exist = FALSE;
fprintf(stdout, "No external potential ");
}
fprintf(stdout, " \n");
DEBUG_INIT("Finished filling external potential");
return 1;
}
/*filling pair for which we exlude attraction interaction. Returns 1 on succes.*/
int fillexclusions(char **pline, BOOL (*exlusions)[MAXT][MAXT])
{
long num1,num2;
char *pline1, *pline2;
void trim (char *);
num1 = strtol(*pline, &pline2, 10);
trim(pline2);
if ((int)strlen(pline2) > 0) {
num2 = strtol(pline2, &pline1, 10);
trim(pline1);
(*exlusions)[num1][num2]=TRUE;
(*exlusions)[num2][num1]=TRUE;
fprintf(stderr, "Exclusions %ld %ld \n", num1, num2);
} else {
fprintf(stderr, "Error in readin Topology exclusions, probably there is not even number of types \n");
return 0;
}
while ((int)strlen(pline1) > 0) {
num1 = strtol(pline1, &pline2, 10);
trim(pline2);
if ((int)strlen(pline2) > 0) {
num2 = strtol(pline2, &pline1, 10);
trim(pline1);
(*exlusions)[num1][num2]=TRUE;
(*exlusions)[num2][num1]=TRUE;
fprintf(stderr, "Exclusions %ld %ld \n", num1, num2);
} else {
fprintf(stderr, "Error in readin Topology exclusions, probably there is not even number of types \n");
return 0;
}
}
return 1;
}
/*filing the parameters for types from given strings. Returns 1 on succes.*/
int filltypes(char **pline, struct topo * topo)
{
int type;
int geotype_i;
int fields;
char name[SMSTR];
char geotype[SMSTR];
double param[11];
/* 0: epsilon
* 1: sigma
* 2: attraction dist
* 3: sttraction switch
* 4: patch angle
* 5: patch switch
* 6: length
* 7(optional): second patche rotation
* 8(optional): second patch angle
* 9(optional): second patch angle switch
* +1: chirality
*/
char typestr[STRLEN], paramstr[STRLEN];
void trim (char *);
void beforecommand(char *, char *, char);
void aftercommand(char *, char *, char);
beforecommand(typestr, *pline, SEPARATOR);
aftercommand(paramstr, *pline, SEPARATOR);
fields = sscanf(paramstr, "%s %d %s %le %le %le %le %le %le %le %le %le %le %le", name, &type, geotype, ¶m[0], ¶m[1], ¶m[2], ¶m[3], ¶m[4], ¶m[5], ¶m[6], ¶m[7], ¶m[8], ¶m[9], ¶m[10]);
fields -= 5; // number of parameter fields => I am too lazy to adjust everywhere below the numbers
//DEBUG fprintf (stdout, "Topology read geotype: %ld with parameters fields %d, str:%s and %s in pline %s\n",geotype,fields,geotypestr,paramstr,pline);
geotype_i = convert_geotype(geotype);
if(!geotype_i){
fprintf(stderr, "TOPOLOGY ERROR: Unknown GEOTYPE: %s!", geotype);
return 0;
}
DEBUG_INIT("geotype_i: %d; fields = %d", geotype_i, fields);
if (( (geotype_i == SCN) || (geotype_i == SPN) ) && (fields != 0)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 1.\n\n", geotype);
return 0;
}
if (( (geotype_i == SCA) || (geotype_i == SPA)) && (fields != 2)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 3.\n\n", geotype);
return 0;
}
if (( (geotype_i == PSC) || (geotype_i == CPSC) ) && (fields != 5)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 5.\n\n", geotype);
return 0;
}
if (( (geotype_i == CHCPSC) || (geotype_i == CHCPSC) )&& ( fields != 6)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 6.\n\n", geotype);
return 0;
}
if (( (geotype_i == TPSC) || (geotype_i == TCPSC) ) && (fields != 8)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 8.\n\n", geotype);
return 0;
}
if (( (geotype_i == TCHCPSC) || (geotype_i == TCHCPSC) )&& ( fields != 9)) {
fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 9.\n\n", geotype);
return 0;
}
if ((geotype_i < 0) || (geotype_i > (MAXT + 10))) {
fprintf (stderr, "TOPOLOGY ERROR: geotype (%s) is out of range: 0 - %d.\n\n", geotype, MAXT + 10);
return 0;
}
strcpy(topo->ia_params[type][type].name, name);
strcpy(topo->ia_params[type][type].other_name, name);
topo->ia_params[type][type].geotype[0] = geotype_i;
topo->ia_params[type][type].geotype[1] = geotype_i;
topo->ia_params[type][type].epsilon = param[0];
topo->ia_params[type][type].sigma = param[1];
topo->ia_params[type][type].rcutwca = (topo->ia_params[type][type].sigma)*pow(2.0,1.0/6.0);
fprintf(stdout, "Topology read of %d: %s (geotype: %s, %d) with parameters %lf %lf", type, name, geotype, geotype_i, topo->ia_params[type][type].epsilon, topo->ia_params[type][type].sigma);
if (fields > 0) {
topo->ia_params[type][type].pdis = param[2];
topo->ia_params[type][type].pswitch = param[3];
topo->ia_params[type][type].rcut = topo->ia_params[type][type].pswitch+topo->ia_params[type][type].pdis;
fprintf(stdout, " %f %f",topo->ia_params[type][type].pdis,topo->ia_params[type][type].pswitch);
}
if (fields > 2) {
int i;
for(i = 0; i < 2; i++){
topo->ia_params[type][type].len[i] = param[6];
topo->ia_params[type][type].half_len[i] = param[6] / 2;
topo->ia_params[type][type].pangl[i] = param[4];
topo->ia_params[type][type].panglsw[i] = param[5];
topo->ia_params[type][type].pcangl[i] = cos(param[4]/2.0/180*PI); // C1
topo->ia_params[type][type].pcanglsw[i] = cos((param[4]/2.0+param[5])/180*PI); // C2
//topo->ia_params[type][type].pcangl[i] = topo->ia_params[type][type].pcangl[i];
//topo->ia_params[type][type].pcanglsw[i] = topo->ia_params[type][type].pcanglsw[i];
topo->ia_params[type][type].pcoshalfi[i] = cos((param[4]/2.0+param[5])/2.0/180*PI);
topo->ia_params[type][type].psinhalfi[i] = sqrt(1.0 - topo->ia_params[type][type].pcoshalfi[i] * topo->ia_params[type][type].pcoshalfi[i]);
}
fprintf(stdout, " %f %f", topo->ia_params[type][type].pangl[0], topo->ia_params[type][type].panglsw[0]);
}
if(fields == 6){
int i;
for(i = 0; i < 2; i++){
topo->ia_params[type][type].chiral_cos[i] = cos(param[7] / 360 * PI);
topo->ia_params[type][type].chiral_sin[i] = sqrt(1 - topo->ia_params[type][type].chiral_cos[i] * topo->ia_params[type][type].chiral_cos[i]);
fprintf(stdout, " %f ", param[7]);
}
}
if ((fields == 8)||(fields == 9)) {
int i;
for(i = 0; i < 2; i++){
topo->ia_params[type][type].csecpatchrot[i] = cos(param[7] / 360 * PI);
topo->ia_params[type][type].ssecpatchrot[i] = sqrt(1 - topo->ia_params[type][type].csecpatchrot[i] * topo->ia_params[type][type].csecpatchrot[i]);
//fprintf(stdout, " %f %f", topo->ia_params[type][type].csecpatchrot[0], topo->ia_params[type][type].ssecpatchrot[0]);
topo->ia_params[type][type].pangl[i+2] = param[8];
topo->ia_params[type][type].panglsw[i+2] = param[9];
topo->ia_params[type][type].pcangl[i+2] = cos(param[8]/2.0/180*PI); // C1
topo->ia_params[type][type].pcanglsw[i+2] = cos((param[8]/2.0+param[9])/180*PI); // C2
//topo->ia_params[type][type].pcangl[i] = topo->ia_params[type][type].pcangl[i];
//topo->ia_params[type][type].pcanglsw[i] = topo->ia_params[type][type].pcanglsw[i];
topo->ia_params[type][type].pcoshalfi[i+2] = cos((param[8]/2.0+param[9])/2.0/180*PI);
topo->ia_params[type][type].psinhalfi[i+2] = sqrt(1.0 - topo->ia_params[type][type].pcoshalfi[i+2] * topo->ia_params[type][type].pcoshalfi[i+2]);
}
fprintf(stdout, " %f %f %f", param[7], topo->ia_params[type][type].pangl[2], topo->ia_params[type][type].panglsw[2]);
}
if(fields == 9){
int i;
for(i = 0; i < 2; i++){
topo->ia_params[type][type].chiral_cos[i] = cos(param[10] / 360 * PI);
topo->ia_params[type][type].chiral_sin[i] = sqrt(1 - topo->ia_params[type][type].chiral_cos[i] * topo->ia_params[type][type].chiral_cos[i]);
fprintf(stdout, " %f ", param[9]);
}
}
// Volume
if (geotype_i < SP)
topo->ia_params[type][type].volume = 4.0/3.0*PI*pow((topo->ia_params[type][type].sigma)/2.0,3.0) + PI/2.0*topo->ia_params[type][type].len[0]*pow((topo->ia_params[type][type].sigma)/2.0,2.0) ;
else
topo->ia_params[type][type].volume = 4.0/3.0*PI*pow((topo->ia_params[type][type].sigma)/2.0,3.0);
if ( topo->ia_params[type][type].rcutwca > topo->sqmaxcut )
topo->sqmaxcut = topo->ia_params[type][type].rcutwca;
if ( topo->ia_params[type][type].rcut > topo->sqmaxcut )
topo->sqmaxcut = topo->ia_params[type][type].rcut;
fprintf(stdout, " \n");
DEBUG_INIT("Finished filltypes");
return 1;
}
/************************************************
* String Manipulation stuff for parsing files
************************************************/
/* return string that goes before comand character*/
void beforecommand(char *str,char *pline,char commandc)
{
char *dummy;
void trim(char *);
strcpy(str,pline);
if ((dummy = strchr (str,commandc)) != NULL) (*dummy) = 0;
trim (str);
}
/* return string that goes after command character */
void aftercommand(char *str, char *pline,char commandc)
{
char *dummy;
int i;
void trim(char *);
strcpy(str,pline);
if ((dummy = strchr (str,commandc)) != NULL) {
i=0;
while( (*dummy) != str[i]) {
str[i] = ' ';
i++;
}
str[i] = ' ';
}
trim (str);
}
/* reads a string from stream of max length n */
char *fgets2(char *line, int n, FILE *stream)
{
char *c;
if (fgets(line,n,stream)==NULL) {
return NULL;
}
if ((c=strchr(line,'\n'))!=NULL)
*c=0;
return line;
}
/* remove comments */
void strip_comment (char *line)
{
char *c;
if (!line) return;
/* search for a comment mark and replace it by a zero */
if ((c = strchr(line,COMMENTSIGN)) != NULL) (*c) = 0;
}
/*test is there is still something left in string*/
int continuing(char *s)
{
int sl;
void rtrim (char *str);
rtrim(s);
sl = strlen(s);
if ((sl > 0) && (s[sl-1] == CONTINUE)) {
s[sl-1] = 0;
return 1; /*true*/
} else return 0; /*false*/
}
/*make strin uppercase*/
void upstring (char *str)
{
int i;
for (i=0; (i < (int)strlen(str)); i++) str[i] = toupper(str[i]);
}
/*trim string from left*/
void ltrim (char *str)
{
char *tr;
int c;
if (!str) return;
tr = strdup (str);
c = 0;
while ((tr[c] == ' ') || (tr[c] == '\n') || (tr[c] == '\t')) c++;
strcpy (str,tr+c);
free (tr);
}
/*trim string from right*/
void rtrim (char *str)
{
int nul;
if (!str) return;
nul = strlen(str)-1;
while ((nul > 0) && ((str[nul] == ' ') || (str[nul] == '\t') || (str[nul] == '\n')) ) {
str[nul] = '\0';
nul--;
}
}
/*trim strin from left and right*/
void trim (char *str)
{
void ltrim (char *str);
void rtrim (char *str);
ltrim (str);
rtrim (str);
}
/**
* Dumps a configuration to the supplied file handle.
*/
void draw(FILE *outfile, /*struct vector box, long npart,
struct particles *particle,*/ struct conf * conf, struct topo * topo)
{
long i;
double anint(double);
//fprintf (outfile, "%15.8le %15.8le %15.8le\n", box.x, box.y, box.z);
#ifdef TESTING
for (i = 0; i < topo->npart; i++) {
fprintf (outfile, "%15.6le %15.6le %15.6le %15.6le %15.6le %15.6le %15.6le %15.6le %15.6le %d\n",
conf->box.x * ((conf->particle[i].pos.x) - anint(conf->particle[i].pos.x)),
conf->box.y * ((conf->particle[i].pos.y) - anint(conf->particle[i].pos.y)),
conf->box.z * ((conf->particle[i].pos.z) - anint(conf->particle[i].pos.z)),
conf->particle[i].dir.x, conf->particle[i].dir.y, conf->particle[i].dir.z,
conf->particle[i].patchdir[0].x, conf->particle[i].patchdir[0].y, conf->particle[i].patchdir[0].z,
conf->particle[i].switched);
}
#else
for (i = 0; i < topo->npart; i++) {
fprintf (outfile, "%15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %d\n",
conf->box.x * ((conf->particle[i].pos.x) - anint(conf->particle[i].pos.x)),
conf->box.y * ((conf->particle[i].pos.y) - anint(conf->particle[i].pos.y)),
conf->box.z * ((conf->particle[i].pos.z) - anint(conf->particle[i].pos.z)),
conf->particle[i].dir.x, conf->particle[i].dir.y, conf->particle[i].dir.z,
conf->particle[i].patchdir[0].x, conf->particle[i].patchdir[0].y, conf->particle[i].patchdir[0].z,
conf->particle[i].switched);
}
#endif
}
/*............................................................................*/
/****************************************************************************/
/* Pairlist stuf */
/****************************************************************************/
/**
* Initializes the pairlist and allocates memory
*/
void init_pairlist(struct topo * topo, struct sim * sim){
printf("\nAllocating memory for pairlist...\n");
sim->pairlist = xmalloc(sizeof(struct pairs) * topo->npart);
// Highest guess: Every particle interacts with the others
// TODO: Make it more sophisticated
long i;
for(i = 0; i < topo->npart; i++){
sim->pairlist[i].pairs = malloc(sizeof(long) * topo->npart);
sim->pairlist[i].num_pairs = 0;
}
}
/*............................................................................*/
/**
* Cleans up: deallocates the memory for the pairlist
*/
int dealloc_pairlist(struct topo * topo, struct sim * sim){
long i;
if(sim->pairlist != NULL){
for(i = 0; i < topo->npart; i++){
if(sim->pairlist[i].pairs != NULL){
free(sim->pairlist[i].pairs);
}
}
free(sim->pairlist);
}
return 0;
}
/*............................................................................*/
/**
* Generates a pairlist with a very basic alogrithm
*/
void gen_simple_pairlist(struct topo * topo, struct sim * sim, struct conf * conf){
struct vector r_cm;
double r_cm2;
double max_dist;
// Set the pairlist to zero
//DEBUG_INIT("Gen Pairlist")
long i, j;
for(i = 0; i < topo->npart; i++){
//DEBUG_INIT("%ld", i);
sim->pairlist[i].num_pairs = 0;
}
long nj = topo->npart;
long ni = nj - 1;
for(i = 0; i < ni; i++){
for(j = i + 1; j < nj; j++){
r_cm.x = conf->particle[i].pos.x - conf->particle[j].pos.x;
r_cm.y = conf->particle[i].pos.y - conf->particle[j].pos.y;
r_cm.z = conf->particle[i].pos.z - conf->particle[j].pos.z;
if ( r_cm.x < 0 )
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) );
else
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) );
if ( r_cm.y < 0 )
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) );
else
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) );
if ( r_cm.z < 0 )
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) );
else
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) );
r_cm2 = DOT(r_cm,r_cm);
max_dist = AVER(sim->trans[conf->particle[i].type].mx, \
sim->trans[conf->particle[j].type].mx);
max_dist *= (1 + sim->pairlist_update) * 2;
max_dist += topo->maxcut;
max_dist *= max_dist; /* squared */
if (r_cm2 <= max_dist){
sim->pairlist[i].pairs[sim->pairlist[i].num_pairs++] = j;
sim->pairlist[j].pairs[sim->pairlist[j].num_pairs++] = i;
}
}
}
////Check for too many pairs
//for(i = 0; i < topo->npart; i++){
// //if (sim->pairlist.list[i].num_pairs >= topo->npart)
// if (sim->pairlist[i].num_pairs >= topo->npart){
// fprintf(stderr, "ERROR: Too many pairs for particle %ld!!!\n", i);
// exit(1);
// }
//}
}
/*.............................................................................*/
/**
* Interface for the generation of the pairlist. Define other pairlist
* algorithms above.
*/
void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf){
gen_simple_pairlist(topo, sim, conf);
}
/*.............................................................................*/
/**
* Print out the pairlist
*/
void print_pairlist(FILE * stream, struct sim * sim, struct topo * topo){
long i, j;
for (i = 0; i < topo->npart; i++){
fprintf(stream, "%ld (%ld):", i, sim->pairlist[i].num_pairs);
for(j = 0; j < sim->pairlist[i].num_pairs; j++){
fprintf(stream, " %ld", sim->pairlist[i].pairs[j]);
}
fprintf(stream, "\n");
}
}
/*..........................................................................*/
/****************************************************************************/
/* Cluster statistics stuf */
/****************************************************************************/
/**
* determines, wheter two particles are in the same cluster
*/
int same_cluster(struct topo * topo, struct conf * conf,
long fst, long snd, double (* intfce[MAXT][MAXT])(struct interacts *) ){
/*if two particles are bonded they belong to the same cluster*/
if ( ((topo->chainparam[conf->particle[fst].chaint]).bond1c >= 0) ||
((topo->chainparam[conf->particle[fst].chaint]).bonddc >= 0) ){
if ( (snd == topo->conlist[fst][1]) || (snd == topo->conlist[fst][0]) ) {
return TRUE;
}
}
if ( ((topo->chainparam[conf->particle[snd].chaint]).bond1c >= 0) ||
((topo->chainparam[conf->particle[snd].chaint]).bonddc >= 0) ){
if ( (fst == topo->conlist[snd][1]) || (fst == topo->conlist[snd][0]) ) {
return TRUE;
}
}
/*cluster is made of particles closer tna some distance*/
/* struct vector image(struct vector r1, struct vector r2, struct vector box);
struct vector r_cm = image(conf->particle[fst].pos,
conf->particle[snd].pos,
conf->box);
double dist2 = DOT(r_cm, r_cm);
* TODO: Make it much more efficient => define cluster_dist!!! *
if(dist2 > topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].sigma * topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].sigma*4.0){
return FALSE;
}
else {
return TRUE;
}*/
/*cluster is made of attractively interacting particles*/
double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct conf * conf);
if(paire(fst, snd, intfce, topo, conf) > -0.10 ){
return FALSE;
}
else {
return TRUE;
}
}
/*............................................................................*/
/**
* generate the clusterlist
*/
int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){
int change = TRUE; /* does it still change? */
//long neighbour;
long i, j, fst, snd, tmp, minnumber, maxnumber;
int same_cluster(struct topo * topo, struct conf * conf,
long fst, long snd, double (* intfce[MAXT][MAXT])(struct interacts *));
// Set clusterindex to the corresponding index
for( i = 0; i < topo->npart; i++){
sim->clusterlist[i] = i;
}
// Start determining the cluster
while(change){
change = FALSE;
for(i = 0; i < topo->npart; i++){
/*If nore pairlist go over all pairs*/
maxnumber = topo->npart;
minnumber = i ;
if (sim->pairlist_update) {
maxnumber = sim->pairlist[i].num_pairs;
minnumber=0;
}
/* Go over pairs to see if they are in the cluster */
for(j = minnumber; j < maxnumber; j++){
fst = i;
snd = j;
if (sim->pairlist_update) {
snd = sim->pairlist[i].pairs[j];
}
/*do cluster analysis only for spherocylinders*/
if ( (topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].geotype[0] < SP) && \
(topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].geotype[1] < SP) ) {
/* if they are close to each other */
if(same_cluster(topo, conf, fst, snd, intfce)){
if(fst > snd){
tmp = snd;
snd = fst;
fst = tmp;
}
if(sim->clusterlist[fst] < sim->clusterlist[snd]){
sim->clusterlist[snd] = sim->clusterlist[fst];
change = TRUE;
break;
/* => will eventually start the i loop from new */
}
if(sim->clusterlist[snd] < sim->clusterlist[fst]){
sim->clusterlist[fst] = sim->clusterlist[snd];
change = TRUE;
break;
/* => will eventually start the i loop from new */
}
}
}
}
if(change){
break;
}
}
}
return 0;
}
/*............................................................................*/
/**
* sort the clusterlist
*/
int sort_clusterlist(struct topo * topo, struct sim * sim){
long cluster_indices[topo->npart]; /* holds the different cluster indices.
(currently too much memory) */
long num_cluster = 0; /* number of clusters, temporary needed */
long i, j;
/* how many clusters are there? */
long max_index = -1;
for(i = 0; i < topo->npart; i++){
if(max_index < sim->clusterlist[i]){
max_index = sim->clusterlist[i];
cluster_indices[num_cluster++] = max_index;
}
}
/* free the memory from the old clusters */
if(sim->clusters){
for(i = 0; i < sim->num_cluster; i++){
if(sim->clusters[i].particles){
free(sim->clusters[i].particles);
}
}
free(sim->clusters);
}
/* Allocate memory for the clusters */
sim->clusters = xmalloc(sizeof(struct cluster) * num_cluster);
for(i = 0; i < num_cluster; i++){
/* allocate maximal space for all the clusters */
sim->clusters[i].particles = xmalloc(sizeof(long) * topo->npart);
sim->clusters[i].npart = 0;
}
/* fill in the particles belonging to one cluster */
for(i = 0; i < num_cluster; i++){
for(j = 0; j < topo->npart; j++){
if(sim->clusterlist[j] == cluster_indices[i]){
sim->clusters[i].particles[sim->clusters[i].npart++] = j;
}
}
}
sim->num_cluster = num_cluster;
/* Find the biggest size */
sim->max_clust = 0;
for(i = 0; i < num_cluster; i++){
if(sim->clusters[i].npart > sim->max_clust){
sim->max_clust = sim->clusters[i].npart;
}
}
/* Set the statistics to zero */
sim->clusterstat = xmalloc(sizeof(long) * sim->max_clust);
for(i = 0; i < sim->max_clust; i++){
sim->clusterstat[i] = 0;
}
/* Do the statistics */
for(i = 0; i < num_cluster; i++){
sim->clusterstat[sim->clusters[i].npart - 1]++;
}
return 0;
}
/*............................................................................*/
/**
* calculate energies of clusters
* */
int calc_clusterenergies(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *)){
long i,j,k;
double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *),
struct topo * topo, struct conf * conf);
for(i = 0; i < sim->num_cluster; i++){
sim->clustersenergy[i]=0.0;
for(j = 0; j < sim->clusters[i].npart; j++){
for(k = j+1; k < sim->clusters[i].npart; k++){
sim->clustersenergy[i]+= paire(sim->clusters[i].particles[j], sim->clusters[i].particles[k], intfce, topo, conf);
}
}
}
return 0;
}
/*............................................................................*/
/**
* print the clusterlist
* */
int print_clusterlist(FILE * stream, BOOL decor,
struct topo * topo, struct sim * sim, struct conf * conf){
long i;
if(decor){
fprintf(stream, "\n"
"-----------------------------------------------------\n"
" The Cluster List\n"
" (Index starts with 1)\n"
"-----------------------------------------------------\n");
}
for(i = 0; i < topo->npart; i++){
fprintf(stream,"%3ld %3ld %8.4lf %8.4lf %8.4lf", i + 1,
sim->clusterlist[i] + 1,
conf->particle[i].pos.x,
conf->particle[i].pos.y,
conf->particle[i].pos.z);
fprintf(stream,"\n");
}
if(decor){
fprintf(stream,"-----------------------------------------------------\n");
}
fflush(stream);
return 0;
}
/*............................................................................*/
/**
* print the clusters
* */
int print_clusters(FILE * stream, BOOL decor, struct sim * sim){
long i, j;
if(decor){
fprintf(stream, "\n"
"-----------------------------------------------------\n"
" The Clusters\n"
" (Index starts with 1)\n"
"-----------------------------------------------------\n");
}
for(i = 0; i < sim->num_cluster; i++){
fprintf(stream, "%3ld(%f):", i + 1,sim->clustersenergy[i]);
for(j = 0; j < sim->clusters[i].npart; j++){
fprintf(stream, "%5ld", sim->clusters[i].particles[j] + 1);
}
fprintf(stream, "\n");
}
if(decor){
fprintf(stream,"---------------------------------------------------\n");
}
fflush(stream);
return 0;
}
/*............................................................................*/
/**
* print a statistics for the clusters
*/
int print_clusterstat(FILE * stream, BOOL decor, struct sim * sim){
long i;
if(decor){
fprintf(stream, "\n"
"-----------------------------------------------------\n"
" Cluster Distribution\n"
"-----------------------------------------------------\n");
}
for(i = 0; i < sim->max_clust; i++){
fprintf(stream, "%5ld\t%5ld\n", i + 1, sim->clusterstat[i]);
}
if(decor){
fprintf(stream, "--------------------------------------------------\n");
}
fflush(stream);
return 0;
}
/*............................................................................*/
/**
* Alternative way of printing the cluster statistics: everything is on
* one line. First monomers, then dimers etc.
*/
int print_clstat_oneline(FILE * stream, long sweep, struct sim * sim){
long i;
fprintf(stream, "%ld: ", sweep);
for(i = 0; i < sim->max_clust; i++){
fprintf(stream, "%5ld\t", sim->clusterstat[i]);
}
fprintf(stream, "\n");
fflush(stream);
return 0;
}
/**
* write out all the cluster stat in files, if file name is given
*/
int write_cluster(FILE * cl_stat, FILE * cl, FILE * cl_list, BOOL decor, long sweep,
struct sim * sim, struct topo * topo, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){
int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *));
int sort_clusterlist(struct topo * topo, struct sim * sim);
int print_clusters(FILE * stream, BOOL decor, struct sim * sim);
int calc_clusterenergies(struct topo * topo, struct sim * sim, struct conf * conf,
double (* intfce[MAXT][MAXT])(struct interacts *));
gen_clusterlist(topo, sim, conf, intfce);
sort_clusterlist(topo, sim);
calc_clusterenergies(topo, sim, conf, intfce);
if(cl_stat){
if(decor == FALSE){
// if no decor, this means usually into a file. Hence print info
// about number of line per frame
fprintf(cl_stat, "Sweep: %ld | Maximal size: %ld\n",
sweep, sim->max_clust);
}
print_clusterstat(cl_stat, decor, sim);
/*
print_clstat_oneline(cl_stat, sweep, sim);
*/
}
if(cl){
if(decor == FALSE){
fprintf(cl, "Sweep: %ld | Number of clusters: %ld\n",
sweep, sim->num_cluster);
}
print_clusters(cl, decor, sim);
}
if(cl_list){
if(decor == FALSE){
fprintf(cl_list, "Sweep: %ld | Number of particles: %ld\n",
sweep, topo->npart);
}
print_clusterlist(cl, decor, topo, sim, conf);
}
return 0;
}
/*............................................................................*/
/****************************************************************************/
/* Wang-Landau stuf */
/****************************************************************************/
/*
Initiate Wang-Landau calculation.
*/
int wlinit(struct wls *wl, char filename[30])
{
long i,length,fields=0;
double field[5];
FILE *infile;
char line[STRLEN];
int wlend(struct wls *);
void trim(char *);
void strip_comment(char *);
infile = fopen(filename, "r");
if (infile == NULL) {
fprintf (stderr, "\nERROR: Could not open %s file.\n\n",filename);
return 1;
}
length=0;
while (fgets2(line,STRLEN-2,infile) != NULL) {
strip_comment (line);
trim (line);
/* if there is something left... */
if ((int)strlen(line) > 0) {
length++;
}
}
length--; /*there is alpha at the first line*/
(*wl).weights = malloc( sizeof(double) * length );
(*wl).hist = malloc( sizeof(long) * length );
(*wl).length[1] = 0;
(*wl).dorder[1] = 0;
fseek(infile,0,SEEK_SET);
i=0;
while (fgets2(line,STRLEN-2,infile) != NULL) {
strip_comment (line);
trim (line);
/* if there is something left... */
if ((int)strlen(line) > 0) {
if (i == 0) {
if (sscanf(line, "%le",&(*wl).alpha)!= 1) {
fprintf (stderr, "ERROR: Could not read alpha at the begining.\n\n");
wlend(wl);
return 1;
} else i++;
} else {
fields = sscanf(line, "%le %le %le %le",&field[0],&field[1],&field[2],&field[3]);
if ( fields == 3 ) {
if (i==1)
(*wl).minorder[0] = field[0];
(*wl).weights[i-1] = field[1];
(*wl).hist[i-1] = field[2];
(*wl).length[0]++;
i++;
} else if (fields == 4 ) {
if (i==1) {
(*wl).minorder[0] = field[0];
(*wl).minorder[1] = field[1];
}
if ( (*wl).minorder[1] == field[1] )
(*wl).length[0]++;
(*wl).weights[i-1] = field[2];
(*wl).hist[i-1] = field[3];
i++;
} else {
fprintf (stderr, "ERROR: Could not read order parameter at line %ld.\n\n", i);
wlend(wl);
return 1;
}
}
}
}
if (fields == 4 ) {
(*wl).length[1] = length / (*wl).length[0];
(*wl).dorder[1] = (field[1] - (*wl).minorder[1])/((*wl).length[1]-1);
}
(*wl).dorder[0] = (field[0] - (*wl).minorder[0])/((*wl).length[0]-1);
if ( ( (i-1) != (*wl).length[0] ) && (fields==3) ) {
fprintf (stderr, "ERROR: In reading order parameters length %ld does not fit number of lines %ld.\n\n", (*wl).length[0],i-1);
wlend(wl);
return 1;
}
if ( ( (i-1) != (*wl).length[0]*(*wl).length[1] ) && (fields==4) ) {
fprintf (stderr, "ERROR: In reading order parameters lengths %ld %ld does not fit number of lines %ld.\n\n", (*wl).length[0],(*wl).length[1],i-1);
wlend(wl);
return 1;
}
/*DEBUG*/
printf("Wang-Landau method init:\n");
printf("alpha: %f\n",(*wl).alpha);
/*int j=0;
if ((*wl).length[1] == 0) {
for (i=0; i<(*wl).length[0]; i++) {
printf ("%15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).weights[i], (*wl).hist[i]);
}
} else {
for (j=0; j<(*wl).length[1]; j++) {
for (i=0; i<(*wl).length[0]; i++) {
printf ("%15.8le %15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).minorder[1]+j*(*wl).dorder[1], (*wl).weights[i+(*wl).length[0]*j], (*wl).hist[i+(*wl).length[0]*j]);
}
printf (" \n");
}
}*/
fclose(infile);
fflush(stdout);
/**/
return 0;
}
int wlwrite(struct wls *wl, char filename[30])
{
long i,j;
FILE *outfile;
outfile = fopen(filename, "w");
if (outfile == NULL) {
fprintf (stderr, "\nERROR: Could not open %s file.\n\n",filename);
return 1;
}
fprintf (outfile, "%15.8le \n",(*wl).alpha);
if ((*wl).length[1] == 0) {
for (i=0; i<(*wl).length[0]; i++) {
fprintf (outfile, "%15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).weights[i], (*wl).hist[i]);
}
} else {
for (j=0; j<(*wl).length[1]; j++) {
for (i=0; i<(*wl).length[0]; i++) {
fprintf (outfile, "%15.8le %15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).minorder[1]+j*(*wl).dorder[1], (*wl).weights[i+(*wl).length[0]*j], (*wl).hist[i+(*wl).length[0]*j]);
}
fprintf (outfile, " \n");
}
}
fflush(outfile);
fclose(outfile);
return 0;
}
int wlend(struct wls *wl)
{
free((*wl).weights);
free((*wl).hist);
return 0;
}
void wlreject(struct sim *sim, long oldlength)
{
int mesh_cpy(struct meshs *, struct meshs *);
int longarray_cpy (long **target, long **source,long,long);
if ( sim->wlm[0] > 0 ) {
sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha;
sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++;
if ( (sim->wlm[0] == 2) || (sim->wlm[1] == 2) )
mesh_cpy(&sim->wl.mesh,&sim->wl.origmesh);
if ( (sim->wlm[0] == 5) || (sim->wlm[1] == 5)||(sim->wlm[0] == 6) || (sim->wlm[1] == 6) ) {
longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,oldlength);
sim->wl.radiusholemax = oldlength;
}
sim->wl.partincontact = sim->wl.partincontactold;
}
}
void wlaccept(int wlm,struct wls *wl)
{
int i;
if ( wlm > 0 ) {
for (i=0;i<2;i++)
(*wl).currorder[i] = (*wl).neworder[i];
(*wl).weights[ (*wl).currorder[0] + (*wl).currorder[1] * (*wl).length[0]] -= (*wl).alpha;
(*wl).hist[ (*wl).currorder[0] + (*wl).currorder[1] * (*wl).length[0]]++;
}
}
/*..............................................................................*/
/*........................NAMETIC ORDER.........................................*/
/*..............................................................................*/
/*
Calculates the instantaneous value of the nematic order parameter for the
specified configuration. The nematic director is determined by diagonalisation
of the tensor order parameter Q (see Allen & Tildesley p305). The order
parameter is the corresponding eigenvalue. However, it is equivalent to take
minus two times the middle eigenvalue (see Eppenga & Frenkel, Mol Phys vol.
52, p.1303-1334 [1984]), and this is more reliable for comparing the isotropic
phase. This is the approach taken in this implementation.
Routines from Numerical Recipes are used to perform the diagonalisation. Note
that these routines expect an n*n matrix to be stored in elements [1...n][1...n],
rather than [0...n-1][0...n-1], so the arrays must be declared with one more
element in each dimension.
*/
double nematic(long npart, struct particles *p)
{
double q[4][4] = {{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0},
{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}};
double d[4], e[4];
long i;
void tred2(double [4][4], double [4], double [4]);
void tqli(double [4], double [4]);
for (i=0; i<npart; i++) {
q[1][1] += p[i].dir.x * p[i].dir.x;
q[1][2] += p[i].dir.x * p[i].dir.y;
q[1][3] += p[i].dir.x * p[i].dir.z;
q[2][1] += p[i].dir.y * p[i].dir.x;
q[2][2] += p[i].dir.y * p[i].dir.y;
q[2][3] += p[i].dir.y * p[i].dir.z;
q[3][1] += p[i].dir.z * p[i].dir.x;
q[3][2] += p[i].dir.z * p[i].dir.y;
q[3][3] += p[i].dir.z * p[i].dir.z;
}
q[1][1] = (q[1][1] * 3.0 / npart - 1.0) / 2.0;
q[1][2] = (q[1][2] * 3.0 / npart ) / 2.0;
q[1][3] = (q[1][3] * 3.0 / npart ) / 2.0;
q[2][1] = (q[2][1] * 3.0 / npart ) / 2.0;
q[2][2] = (q[2][2] * 3.0 / npart - 1.0) / 2.0;
q[2][3] = (q[2][3] * 3.0 / npart ) / 2.0;
q[3][1] = (q[3][1] * 3.0 / npart ) / 2.0;
q[3][2] = (q[3][2] * 3.0 / npart ) / 2.0;
q[3][3] = (q[3][3] * 3.0 / npart - 1.0) / 2.0;
tred2 (q, d, e);
tqli (d, e);
/* Sort eigenvalues */
if (d[1] > d[2]) { d[0]=d[1]; d[1]=d[2]; d[2]=d[0]; }
if (d[2] > d[3]) { d[0]=d[2]; d[2]=d[3]; d[3]=d[0]; }
if (d[1] > d[2]) { d[0]=d[1]; d[1]=d[2]; d[2]=d[0]; }
return -2.0*d[2];
}
/*..............................................................................*/
/*
Returns the coefficient of the Fourier series term with period boxlength/n
in the z direction. The coefficients of the sine and cosine terms are added
in quadrature and returned, making the result independent of phase shifts in
the z direction. A significantly non-zero value indicates layering of the
particles in the z direction with periodicity boxlength/n.
*/
double smectic(long npart, struct particles *p, long n)
{
double a, b;
double omega = 8.0*n*atan(1.0);
long i;
a = b = 0.0;
for (i=0; i<npart; i++) {
a += cos(omega * p[i].pos.z);
b += sin(omega * p[i].pos.z);
}
a /= (double)npart;
b /= (double)npart;
return sqrt(a*a + b*b);
}
/*..............................................................................*/
/*........................Z ORDER PARAMETER.....................................*/
long z_order(struct wls *wl, struct conf * conf,int wli)
{
// printf("%f %ld\n",particle[0].pos.z * box.z,lround(particle[0].pos.z * box.z / wl.dorder[wli] - wl.minorder[wli]));
/* Because older C compilators do not know lround we can use ceil as well
return lround(particle[0].pos.z * box.z / wl.dorder[wli] - wl.minorder[wli]);*/
/*printf("pos Z %f ",conf->particle[0].pos.z );
printf("%f ",conf->syscm.z);
printf("%f ",conf->box.z);
printf("%f ", wl->minorder[wli]);
printf("dorder %f \n", wl->dorder[wli] );*/
return (long) ceil( ((conf->particle[0].pos.z - conf->syscm.z) * conf->box.z- wl->minorder[wli]) / wl->dorder[wli] );
}
/*..............................................................................*/
/*........................2 particles distance.....................................*/
long twopartdist(struct wls *wl, struct conf * conf, int wli)
{
struct vector r_cm;
r_cm.x = conf->particle[0].pos.x - conf->particle[1].pos.x;
r_cm.y = conf->particle[0].pos.y - conf->particle[1].pos.y;
r_cm.z = conf->particle[0].pos.z - conf->particle[1].pos.z;
if ( r_cm.x < 0 )
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) );
else
r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) );
if ( r_cm.y < 0 )
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) );
else
r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) );
if ( r_cm.z < 0 )
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) );
else
r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) );
return (long) ceil( (( sqrt(r_cm.x*r_cm.x + r_cm.y*r_cm.y) ) - wl->minorder[wli]) / wl->dorder[wli] );
}
/*..............................................................................*/
/*........................alignment ORDER PARAMETER.....................................*/
double alignment_order(struct conf * conf, struct topo * topo)
{
double sumdot=0;
long i,j;
struct vector r_cm;
struct vector image(struct vector, struct vector, struct vector);
for (i = 0; i < topo->npart - 1; i++) {
for (j = i + 1; j < topo->npart; j++) {
r_cm = image(conf->particle[i].pos, conf->particle[j].pos, conf->box);
if ( DOT(r_cm,r_cm) < 1.5*1.5 ) {
sumdot+= DOT(conf->particle[i].dir,conf->particle[j].dir);
}
}
}
return sumdot;
}
/*..............................................................................*/
/*........................HOLE IN MESH-MEMBRANE ORDER PARAM.....................*/
/* return change in order parameter when one particle moves*/
long meshorder_moveone(struct vector oldpos, struct vector newpos, struct meshs *mesh,
long npart, long target, struct conf * conf, struct sim * sim, int wli)
{
int change;
int nx,ny,ox,oy; /* position in mesh */
double resid;
void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim);
int mesh_findholes(struct meshs *);
int mesh_addpart(double, double, int **, int [2]);
int mesh_removepart(double, double, int **, int [2]);
if ( conf->particle[target].type != sim->wl.wlmtype )
return sim->wl.currorder[wli];
nx = (int) (INBOX(newpos.x,resid) * (*mesh).dim[0]);
ny = (int) (INBOX(newpos.y,resid) * (*mesh).dim[1]);
ox = (int) (INBOX(oldpos.x,resid) * (*mesh).dim[0]);
oy = (int) (INBOX(oldpos.y,resid) * (*mesh).dim[1]);
if ( (nx == ox) && (ny == oy) ) return sim->wl.currorder[wli]; /* particle stayed in the same mesh bin*/
change = mesh_addpart(newpos.x,newpos.y,&(*mesh).data,(*mesh).dim);
if (change) {
change = mesh_removepart(oldpos.x,oldpos.y,&(*mesh).data,(*mesh).dim);
}
if ( !change ) {
/* fill the mesh with particles*/
mesh_fill(mesh,npart,conf->particle, sim);
return (long) (mesh_findholes(mesh) - sim->wl.minorder[wli]);
}
return sim->wl.currorder[wli];
}
/* return change in order parameter when chain moves*/
long meshorder_movechain(long chain[MAXN], struct meshs *mesh,
long npart, struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL], int wli)
{
long i,current;
int change;
void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim);
int mesh_findholes(struct meshs *);
int mesh_addpart(double, double, int **, int [2]);
int mesh_removepart(double, double, int **, int [2]);
change= 1;
i = 0;
current = chain[0];
while ( (current >=0 ) && (change) ) {
if ( conf->particle[current].type == sim->wl.wlmtype ) {
change = mesh_addpart(conf->particle[current].pos.x, conf->particle[current].pos.y, &(*mesh).data, (*mesh).dim);
}
i++;
current = chain[i];
}
i = 0;
current = chain[0];
while ( (current >=0 ) && (change) ) {
if ( conf->particle[current].type == sim->wl.wlmtype ) {
change = mesh_removepart(chorig[i].pos.x, chorig[i].pos.y, &(*mesh).data, (*mesh).dim);
}
i++;
current = chain[i];
}
if ( !change ) {
/* fill the mesh with particles*/
mesh_fill(mesh,npart,conf->particle, sim);
return (long) (mesh_findholes(mesh) - sim->wl.minorder[wli]);
}
return sim->wl.currorder[wli];
}
/* filling the mesh */
void mesh_fill(struct meshs *mesh, long npart, struct particles *particle, struct sim * sim)
{
long i;
int mesh_addpart(double posx, double posy, int **mesh, int dim[2]);
for ( i=0; i<((*mesh).dim[0] * (*mesh).dim[1]); i++) {
(*mesh).data[i] = 0;
}
for (i=0; i<npart; i++) {
/*calculate position of particle on mesh and add it to all where it belongs */
if (particle[i].type == sim->wl.wlmtype)
mesh_addpart(particle[i].pos.x,particle[i].pos.y, &(*mesh).data, (*mesh).dim);
}
}
/* add particle on coordinates posx posy to mesh return 0 if it was placed on empty spot*/
int mesh_addpart(double posx, double posy, int **mesh, int dim[2])
{
int i, square[9], onhole;
double resid;
void mesh_square(int , int , int [2], int (*)[9]);
onhole = 1;
mesh_square( (int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]) , dim, &square);
for(i=0;i<9;i++) {
if ( (square[i] >= dim[0]*dim[1])||(square[i] <0) ) {
printf ("Error: trying to write to %d\n",square[i]);
printf ("%d %d and %d\n", (int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]),i );
fflush(stdout);
}
if ( ((*mesh)[ square[i] ]) >= 0 ) onhole = 0;
(*mesh)[ square[i] ]--;
}
return onhole;
}
/* remove particle on coordinates posx posy from mesh and return 0 if there is a empty spot now*/
int mesh_removepart(double posx, double posy, int **mesh, int dim[2])
{
int i, square[9];
double resid;
void mesh_square(int , int , int [2], int (*)[9]);
mesh_square((int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]) , dim, &square);
for(i=0;i<9;i++) {
//DEBUG if (square[i] >= dim[0]*dim[1]) printf ("Error: trying to write to %d\n",square[i]);
(*mesh)[ square[i] ]++;
if ( ((*mesh)[ square[i] ]) == 0 ) return 0;
}
return 1;
}
void mesh_square(int x, int y, int dim[2], int (*square)[9])
{
int a,b;
b=y;
(*square)[0] = x + dim[0]*b;
a = x-1;
if ( a<0 ) a = dim[0]-1;
(*square)[1] = a + dim[0]*b;
a = x+1;
if ( a==dim[0] ) a = 0;
(*square)[2] = a + dim[0]*b;
b = y-1;
if ( b<0 ) b = dim[1]-1;
(*square)[3] = x + dim[0]*b;
a = x-1;
if ( a<0 ) a = dim[0]-1;
(*square)[4] = a + dim[0]*b;
a = x+1;
if ( a==dim[0] ) a = 0;
(*square)[5] = a + dim[0]*b;
b = y+1;
if ( b==dim[1] ) b = 0;
(*square)[6] = x + dim[0]*b;
a = x-1;
if ( a<0 ) a = dim[0]-1;
(*square)[7] = a + dim[0]*b;
a = x+1;
if ( a==dim[0] ) a = 0;
(*square)[8] = a + dim[0]*b;
}
void mesh_neighbors(int pos, int dim[2], int neighbors[4])
{
int x,y,a;
x = pos % dim[0];
y = pos / dim[0];
a = x-1;
if ( a<0 ) a = dim[0]-1;
neighbors[0] = a + dim[0]*y;
a = x+1;
if ( a==dim[0] ) a = 0;
neighbors[1] = a + dim[0]*y;
a = y-1;
if ( a<0 ) a = dim[1]-1;
neighbors[2] = x + dim[0]*a;
a = y+1;
if ( a==dim[1] ) a = 0;
neighbors[3] = x + dim[0]*a;
}
/* returns the number of holes and a list of mesh points belonging to each of them */
int mesh_findholes(struct meshs *mesh)
{
int i,j, k, n, size, li, maxsize;
int neighbors[4];
void mesh_neighbors(int, int [2], int [4]);
n=0;
maxsize = 0;
for (i=0;i<((*mesh).dim[0] * (*mesh).dim[1]);i++) {
(*mesh).tmp[i] = 0;
if ( (*mesh).data[i] > 0 ) (*mesh).data[i] = 0;
}
i=0;
// go through all mesh points
while ( i < ((*mesh).dim[0] * (*mesh).dim[1]) ) {
// test if mesh point is occupied
if ( (*mesh).data[i] != 0 ) { i++; }
else {
// mesh point is free, create a new cluster
n++;
(*mesh).data[i] = n;
// start new cluster, put mesh point as first element, and set list pointer on first element
//DEBUG if (n >= mesh.dim[0]*mesh.dim[1]) printf ("Error: trying to write to sizes position %d\n",n);
size = 1;
(*mesh).tmp[0] = i;
li = 0;
// go through all elements of the cluster
while ( li < size ) {
//go through all neighbors
j = (*mesh).tmp[li];
mesh_neighbors(j, (*mesh).dim, neighbors);
for ( k=0; k<4; k++ ) {
// test if status is free and append it to the cluster
if ( (*mesh).data[ neighbors[k] ] == 0 ) {
(*mesh).data[ neighbors[k] ] = n;
// append mesh point as element in the list
(*mesh).tmp[size] = neighbors[k];
size++;
}
if ( (*mesh).data[ neighbors[k] ] > 0 && (*mesh).data[ neighbors[k] ]<n ) {
fprintf(stderr,"Error: Mesh cluster out of range, propably going infinite through pbc.");
fflush(stderr);
}
}
li++;
}
if (size > maxsize) maxsize = size;
}
}
return maxsize;
}
int mesh_init(struct meshs *mesh, double meshsize, long npart, struct conf * conf, struct sim * sim)
{
// int i;
int maxsize,length;
void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim);
int mesh_findholes(struct meshs *);
(*mesh).dim[0] = (int)(conf->box.x/meshsize);
(*mesh).dim[1] = (int)(conf->box.y/meshsize);
if ( (*mesh).data != NULL ) free((*mesh).data);
if ( (*mesh).tmp != NULL ) free((*mesh).tmp);
length = (*mesh).dim[0] * (*mesh).dim[1];
(*mesh).data = malloc( sizeof(int)* (length));
(*mesh).tmp = malloc( sizeof(int)* (length+1));
/* fill the mesh with particles*/
mesh_fill(mesh, npart,conf->particle, sim);
/* perfrom hole cluster algorithm */
maxsize = mesh_findholes(mesh);
/*DEBUG printf("maxsize: %d\n",maxsize);
printf("mesh:\n");
for (i=0;i<mesh.dim[0]*mesh.dim[1];i++) {
printf("%d ",mesh.data[i]);
if ( ((i+1) % mesh.dim[0]) == 0) printf("\n");
}*/
return maxsize;
}
void mesh_print (struct meshs *mesh)
{
int i;
int mesh_findholes(struct meshs *);
printf("mesh:\n");
for (i=0;i<(*mesh).dim[0] * (*mesh).dim[1];i++) {
printf("%d ",(*mesh).data[i]);
if ( ((i+1) % (*mesh).dim[0]) == 0) printf("\n");
}
printf("hole %d:\n", mesh_findholes(mesh) );
printf("\n");
}
int mesh_cpy (struct meshs *target, struct meshs *source)
{
if ( (*target).data != NULL) {
if ( ((*target).dim[0] == (*source).dim[0]) && ((*target).dim[1] == (*source).dim[1]) ) {
memcpy((*target).data,(*source).data, sizeof(int)* ((*target).dim[0] * (*target).dim[1]) );
return 0;
} else {
free ((*target).data);
if ( (*source).dim[0] * (*source).dim[1] > (*target).dim[0] * (*target).dim[1] ) {
if ((*target).tmp != NULL ) free ((*target).tmp);
(*target).tmp = malloc( sizeof(int)* ((*source).dim[0] * (*source).dim[1] + 1));
}
}
}
(*target).dim[0] = (*source).dim[0];
(*target).dim[1] = (*source).dim[1];
(*target).data = malloc( sizeof(int)* ((*target).dim[0] * (*target).dim[1]));
if ((*target).tmp == NULL ) (*target).tmp = malloc( sizeof(int)* ((*source).dim[0] * (*source).dim[1] + 1));
memcpy((*target).data,(*source).data, sizeof(int)* ((*target).dim[0] * (*target).dim[1]) );
return 0;
}
int mesh_end(struct meshs *mesh)
{
/* free allocated memory */
if ( (*mesh).data!= NULL ) free((*mesh).data);
if ( (*mesh).tmp!= NULL ) free((*mesh).tmp);
return 0;
}
/*..............................................................................*/
/*........................RADIUS HOLE IN CENTER MEMBRANE ORDER PARAM............*/
/*return current bin of free radius*/
long radiushole_order(struct sim * sim)
{
long i;
for (i=0;i<sim->wl.radiusholemax-3;i++){
if ((sim->wl.radiushole[i] >0 ) && (sim->wl.radiushole[i+1] >0 ) && (sim->wl.radiushole[i+2] >0 ) && (sim->wl.radiushole[i+3] >0 ))
return i-1;
}
return -100;
}
/*return order of given radius */
long radiushole_position(double radius, struct sim * sim, int wli)
{
return (long) ceil( ( radius - sim->wl.minorder[wli]) / sim->wl.dorder[wli] );
}
/* return change in order parameter when one particle moves*/
long radiusholeorder_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli, struct vector *position)
{
long nr,or; /* position in radiushole */
double rx,ry,z;
BOOL oz,nz;
long radiushole_position(double radius, struct sim * sim,int);
long radiushole_order(struct sim *sim);
double anint(double);
void radiushole_print (long *radiushole, long length);
if ( conf->particle[target].type != sim->wl.wlmtype )
return sim->wl.currorder[wli];
z=conf->particle[target].pos.z - position->z; /*if above position*/
if (z-anint(z) < 0) nz = FALSE;
else nz=TRUE;
z=oldpos->z - position->z; /*if above position*/
if (z-anint(z) < 0) oz = FALSE;
else oz=TRUE;
if ( !(nz) && !(oz) )
return sim->wl.currorder[wli];
rx = conf->box.x * (conf->particle[target].pos.x - anint(conf->particle[target].pos.x));
ry = conf->box.y * (conf->particle[target].pos.y - anint(conf->particle[target].pos.y));
nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli);
if (nr < 0)
return -100;
/*particle move over radius bins*/
if (nz) {
sim->wl.radiushole[nr]++;
}
if (oz) {
rx = conf->box.x * (oldpos->x - anint(oldpos->x));
ry = conf->box.y * (oldpos->y - anint(oldpos->y));
or = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli);
sim->wl.radiushole[or]--;
if ( sim->wl.radiushole[or] < 0 ) {
printf ("Error(single particle move): trying to make number of beads in radiuspore smaller than 0 at position %ld\n",or);
radiushole_print(sim->wl.radiushole,sim->wl.radiusholemax);
fflush(stdout);
}
if (sim->wl.radiushole[or] ==0)
return radiushole_order(sim);
}
if ( (nz) && (sim->wl.radiushole[nr] ==1) ) {
return radiushole_order(sim);
}
return sim->wl.currorder[wli];
}
/* return change in order parameter when chain moves*/
long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli, struct vector *position)
{
long i,current,nr;
double rx,ry,z;
BOOL change=FALSE;
long radiushole_position(double radius, struct sim * sim,int);
long radiushole_order(struct sim *sim);
double anint(double);
void radiushole_print (long *radiushole, long length);
i = 0;
rx=0;
current = chain[0];
while (current >=0 ) {
if ( conf->particle[current].type == sim->wl.wlmtype ) {
z=conf->particle[current].pos.z - position->z; /*if above system CM*/
if (z-anint(z) > 0) {
rx = conf->box.x * (conf->particle[current].pos.x - anint(conf->particle[current].pos.x));
ry = conf->box.y * (conf->particle[current].pos.y - anint(conf->particle[current].pos.y));
nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli);
if (nr < 0)
return -100;
sim->wl.radiushole[nr]++;
if ( sim->wl.radiushole[nr] == 1 ) change = TRUE;
}
}
i++;
current = chain[i];
}
i = 0;
current = chain[0];
while (current >=0 ) {
if ( conf->particle[current].type == sim->wl.wlmtype ) {
z=chorig[i].pos.z - position->z; /*if above system CM*/
if (z-anint(z) > 0) {
rx = conf->box.x * (chorig[i].pos.x - anint(chorig[i].pos.x));
ry = conf->box.y * (chorig[i].pos.y - anint(chorig[i].pos.y));
nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli);
sim->wl.radiushole[nr]--;
if ( sim->wl.radiushole[nr] < 0 ) {
printf ("Error (chainmove): trying to make number of beads in radiuspore smaller than 0 at position %ld\n",nr);
radiushole_print(sim->wl.radiushole,sim->wl.radiusholemax);
fflush(stdout);
}
if ( sim->wl.radiushole[nr] == 0 ) change = TRUE;
}
}
i++;
current = chain[i];
}
if ( change ) {
return radiushole_order(sim);
}
return sim->wl.currorder[wli];
}
/* filling the radiushole above vec*/
long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli, struct vector *position)
{
long i,nr,radiusholemax;
double rx,ry,z;
long radiushole_position(double radius, struct sim * sim,int);
long radiushole_order(struct sim *sim);
double anint(double);
radiusholemax = radiushole_position(sqrt(conf->box.x*conf->box.x+conf->box.y*conf->box.y),sim,wli);
if ( radiusholemax > sim->wl.radiusholemax ) {
if (sim->wl.radiushole != NULL)
free(sim->wl.radiushole);
sim->wl.radiushole = malloc( sizeof(long)* (radiusholemax));
sim->wl.radiusholemax = radiusholemax;
}
for (i=0;i<radiusholemax;i++) {
sim->wl.radiushole[i] = 0;
}
for (i=0; i< topo->npart; i++) {
/*calculate position of particle from z axis, and add it in array */
if ( conf->particle[i].type == sim->wl.wlmtype ) {
z=conf->particle[i].pos.z - (*position).z; /*if above position*/
if (z-anint(z) > 0) {
rx = conf->box.x * (conf->particle[i].pos.x - anint(conf->particle[i].pos.x));
ry = conf->box.y * (conf->particle[i].pos.y - anint(conf->particle[i].pos.y));
nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli);
if (nr < 0)
return -100;
sim->wl.radiushole[nr]++;
}
}
}
return radiushole_order(sim);
}
void radiushole_print (long *radiushole, long length)
{
long i;
printf("radiushole:\n");
for (i=0;i<length;i++) {
printf("%ld ",radiushole[i]);
}
printf("\n");
}
int longarray_cpy (long **target, long **source, long targetlength, long sourcelength)
{
/*if ( (*target) != NULL) {
if ( targetlength == sourcelength ) {
memcpy((*target),(*source), sizeof(long)*(sourcelength));
return 0;
} else {
free(*target);
}
}*/
if ( (*target) != NULL)
(*target) = (long*) realloc((*target), sizeof(long)*(sourcelength));
else
(*target) = malloc( sizeof(long)*(sourcelength));
memcpy((*target),(*source), sizeof(long)*(sourcelength));
return 0;
}
/*..............................................................................*/
/* ............................... particles in contact ..................... */
/*return order for particles in contact */
long contparticles_order(struct sim * sim, int wli)
{
return (long) ceil( ( sim->wl.partincontact - sim->wl.minorder[wli]) / sim->wl.dorder[wli] );
}
/*returns if particle is in contact*/
BOOL particleinncontact (struct vector *vec, struct conf *conf)
{
double x,y,z;
double anint(double);
x = vec->x - conf->particle[0].pos.x;
y = vec->y - conf->particle[0].pos.y;
z = vec->z - conf->particle[0].pos.z;
x = conf->box.x * (x - anint(x));
y = conf->box.y * (y - anint(y));
z = conf->box.z * (z - anint(z));
if ( x*x + y*y + z*z < WL_CONTACTS) {
return TRUE;
}
else {
return FALSE;
}
}
/* return change in number of particles in contact when one particle moves*/
long contparticles_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli)
{
long contparticles_order(struct sim * sim, int wli);
BOOL particleinncontact (struct vector *vec, struct conf *conf);
if ( conf->particle[target].type != sim->wl.wlmtype )
return sim->wl.currorder[wli];
if ( particleinncontact (&(conf->particle[target].pos),conf) )
sim->wl.partincontact++;
if ( particleinncontact (oldpos,conf) )
sim->wl.partincontact--;
return contparticles_order(sim,wli);
}
/* return change in order parameter when chain moves*/
long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli)
{
long i,current;
long contparticles_order(struct sim * sim, int wli);
BOOL particleinncontact (struct vector *vec, struct conf *conf);
i = 0;
current = chain[0];
while (current >=0 ) {
if ( conf->particle[current].type == sim->wl.wlmtype ) {
if ( particleinncontact (&(conf->particle[current].pos),conf) )
sim->wl.partincontact++;
}
i++;
current = chain[i];
}
i = 0;
current = chain[0];
while (current >=0 ) {
if ( conf->particle[current].type == sim->wl.wlmtype ) {
if ( particleinncontact (&(chorig[i].pos),conf) )
sim->wl.partincontact--;
}
i++;
current = chain[i];
}
return contparticles_order(sim,wli);
}
/* filling all particles in the contact */
long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli)
{
long i;
long contparticles_order(struct sim * sim, int wli);
BOOL particleinncontact (struct vector *vec, struct conf *conf);
sim->wl.partincontact = 0;
for (i=1; i< topo->npart; i++) {
/*calculate position of particle and add it if in contact */
if ( conf->particle[i].type == sim->wl.wlmtype ) {
if ( particleinncontact (&(conf->particle[i].pos),conf) )
sim->wl.partincontact++;
}
}
return contparticles_order(sim,wli);
}
/*..............................................................................*/
/*........................GEOMETRIC STUFF.......................................*/
/*..............................................................................*/
/*..............................................................................*/
/*
Find closest distance between line segments and return its vector
gets orientations and lengths of line segments and the vector connecting
their center os masses (from vec1 to vec2)
*/
// Copyright 2001, softSurfer (www.softsurfer.com)
// This code may be freely used and modified for any purpose
// providing that this copyright notice is included with it.
// SoftSurfer makes no warranty for this code, and cannot be held
// liable for any real or imagined damage resulting from its use.
// Users of this code must verify correctness for their application.
struct vector mindist_segments(struct vector dir1, double halfl1,
struct vector dir2, double halfl2, struct vector r_cm)
{
struct vector u,v,w,vec;
double a,b,c,d,e,D,sc,sN,sD,tc,tN,tD;
struct vector vec_scale(struct vector, double);
u = vec_scale(dir1,2.0*halfl1); //S1.P1 - S1.P0;
v = vec_scale(dir2,2.0*halfl2); //S2.P1 - S2.P0;
w.x = dir2.x*halfl2 - dir1.x*halfl1 - r_cm.x;
w.y = dir2.y*halfl2 - dir1.y*halfl1 - r_cm.y;
w.z = dir2.z*halfl2 - dir1.z*halfl1 - r_cm.z; //S1.P0 - S2.P0;
a = DOT(u,u); // always >= 0
b = DOT(u,v);
c = DOT(v,v); // always >= 0
d = DOT(u,w);
e = DOT(v,w);
D = a*c - b*b; // always >= 0
sc = D;
sN = D;
sD = D; // sc = sN / sD, default sD = D >= 0
tc = D;
tN = D;
tD = D; // tc = tN / tD, default tD = D >= 0
// compute the line parameters of the two closest points
if (D < 0.00000001) { // the lines are almost parallel
sN = 0.0; // force using point P0 on segment S1
sD = 1.0; // to prevent possible division by 0.0 later
tN = e;
tD = c;
}
else { // get the closest points on the infinite lines
sN = (b*e - c*d);
tN = (a*e - b*d);
if (sN < 0.0) { // sc < 0 => the s=0 edge is visible
sN = 0.0;
tN = e;
tD = c;
}
else if (sN > sD) { // sc > 1 => the s=1 edge is visible
sN = sD;
tN = e + b;
tD = c;
}
}
if (tN < 0.0) { // tc < 0 => the t=0 edge is visible
tN = 0.0;
// recompute sc for this edge
if (-d < 0.0)
sN = 0.0;
else if (-d > a)
sN = sD;
else {
sN = -d;
sD = a;
}
}
else if (tN > tD) { // tc > 1 => the t=1 edge is visible
tN = tD;
// recompute sc for this edge
if ((-d + b) < 0.0)
sN = 0;
else if ((-d + b) > a)
sN = sD;
else {
sN = (-d + b);
sD = a;
}
}
// finally do the division to get sc and tc
if (fabs(sN) < 0.00000001) sc = 0.0 ;
else sc = sN / sD;
if (fabs(tN) < 0.00000001) tc = 0.0 ;
else tc = tN / tD;
// get the difference of the two closest points
//Vector = w + (sc * u) - (tc * v); // = S1(sc) - S2(tc)
vec.x = u.x*sc + w.x - v.x*tc;
vec.y = u.y*sc + w.y - v.y*tc;
vec.z = u.z*sc + w.z - v.z*tc;
return vec;
}
/*..............................................................................*/
/*
Find closest distance between line segment and point and return it as vector
(from point to closest segment point)
Function gets orientation and length of line segments and the vector connecting
their center os masses (from segment to point)
*/
struct vector mindist_segmentpoint(struct vector dir1, double length, struct vector r_cm)
{
struct vector vec;
double c,d,halfl;
halfl=length*0.5;
c = DOT(dir1,r_cm);
if (c >= halfl) d = halfl;
else {
if (c > -halfl) d = c;
else d = -halfl;
}
vec.x = - r_cm.x + dir1.x * d;
vec.y = - r_cm.y + dir1.y * d;
vec.z = - r_cm.z + dir1.z * d;
return vec;
}
/*..............................................................................*/
/*
Determines whether two particles overlap.
Returns 1 if there is an overlap, 0 if not.
*/
int overlap(struct particles part1, struct particles part2,
struct vector box, struct ia_param ia_params[MAXT][MAXT])
{
double b, c, d, e, f; /* Coefficients in distance quadratic */
double boundary; /* Half length of central boundary zone of quadratic */
double det;
double halfl; /* Half length of cylinder */
double s0, t0; /* det times location of min separation of infinite lines */
double ss, tt; /* Location of min separation of line segments */
struct vector r_cm; /* Vector between centres of mass */
double dist; /* Distance between particles*/
struct vector distvec; /* Distance vector between particles*/
double linemin(double, double);
struct vector image(struct vector, struct vector, struct vector);
r_cm = image(part1.pos, part2.pos, box);
if ((part1.type >= SP) && (part2.type >= SP)) { /*we have two spheres - most common, do nothing*/
dist=sqrt(DOT(r_cm,r_cm));
} else {
if ((ia_params[part1.type][part2.type].geotype[0] < SP) && (ia_params[part1.type][part2.type].geotype[1] < SP)) { /*we have two spherocylinders*/
/*finding closes contact between them*/
b = -DOT(part1.dir, part2.dir);
d = DOT(part1.dir, r_cm);
e = -DOT(part2.dir, r_cm);
f = DOT(r_cm, r_cm);
det = 1.0 - b*b;
//halfl = length / 2.0;
// Just take the mean
halfl = ia_params[part1.type][part2.type].half_len[0] = ia_params[part1.type][part2.type].half_len[1];
halfl /= 2;
boundary = det * halfl;
/* Location of smallest separation of the infinite lines */
s0 = b*e - d;
t0 = b*d - e;
/* Location of smallest separation of line segments */
if (s0 >= boundary) {
if (t0 >= boundary) {
/* Region 2 */
if ( d + halfl + halfl*b < 0.0 ) {
ss = halfl;
tt = linemin( -ss*b - e, halfl );
} else {
tt = halfl;
ss = linemin( -tt*b - d, halfl );
}
} else if (t0 >= -boundary) {
/* Region 1 */
ss = halfl;
tt = linemin( -ss*b - e, halfl );
} else {
/* Region 8 */
if ( d + halfl - halfl*b < 0.0 ) {
ss = halfl;
tt = linemin( -ss*b - e, halfl );
} else {
tt = -halfl;
ss = linemin( -tt*b - d, halfl );
}
}
} else if (s0 >= -boundary) {
if (t0 >= boundary) {
/* Region 3 */
tt = halfl;
ss = linemin( -tt*b - d, halfl );
} else if (t0 >= -boundary) {
/* Region 0 */
ss = s0/det;
tt = t0/det;
} else {
/* Region 7 */
tt = -halfl;
ss = linemin( -tt*b - d, halfl );
}
} else {
if (t0 >= boundary) {
/* Region 4 */
if ( d - halfl + halfl*b > 0.0 ) {
ss = -halfl;
tt = linemin( -ss*b - e, halfl );
} else {
tt = halfl;
ss = linemin( -tt*b - d, halfl );
}
} else if (t0 >= -boundary) {
/* Region 5 */
ss = -halfl;
tt = linemin( -ss*b - e, halfl );
} else {
/* Region 6 */
if ( d - halfl - halfl*b > 0.0 ) {
ss = -halfl;
tt = linemin( -ss*b - e, halfl );
} else {
tt = -halfl;
ss = linemin( -tt*b - d, halfl );
}
}
}
/*ss snd tt are Location of min separation of line segments */
dist=sqrt(f + ss*ss + tt*tt + 2.0*(ss*d + tt*e + ss*tt*b));
} else {
if (ia_params[part1.type][part2.type].geotype[0] < SP) { /*We have one spherocylinder -it is first one*/
//halfl=length/2;/*finding closest vector from sphyrocylinder to sphere*/
halfl=ia_params[part1.type][part2.type].half_len[0];/*finding closest vector from sphyrocylinder to sphere*/
c = DOT(part1.dir,r_cm);
if (c >= halfl) d = halfl;
else {
if (c > -halfl) d = c;
else d = -halfl;
}
distvec.x = - r_cm.x + part1.dir.x * d;
distvec.y = - r_cm.y + part1.dir.y * d;
distvec.z = - r_cm.z + part1.dir.z * d;
dist=sqrt(DOT(distvec,distvec));
} else { /*lst option first one is sphere second one spherocylinder*/
//halfl=length/2; /*finding closest vector from sphyrocylinder to sphere*/
halfl=ia_params[part1.type][part2.type].half_len[1];/*finding closest vector from sphyrocylinder to sphere*/
c = DOT(part2.dir,r_cm);
if (c >= halfl) d = halfl;
else {
if (c > -halfl) d = c;
else d = -halfl;
}
distvec.x = r_cm.x - part2.dir.x * d;
distvec.y = r_cm.y - part2.dir.y * d;
distvec.z = r_cm.z - part2.dir.z * d;
dist=sqrt(DOT(distvec,distvec));
}
}
}
/* Overlap exists if smallest separation is less than diameter of cylinder */
if (dist < ia_params[part1.type][part2.type].sigma*0.5 ) {
return 1;
} else {
return 0;
}
}
/*..............................................................................*/
double linemin(double criterion, double halfl)
{
if (criterion >= halfl) { return halfl; }
else if (criterion >= -halfl) { return criterion; }
else { return -halfl; }
}
/*..............................................................................*/
/*........................SOME USEFUL MATH......................................*/
/*..............................................................................*/
/*
ran2 from Numerical Recipes.
*/
#define IM1 2147483563
#define IM2 2147483399
#define AM (1.0/IM1)
#define IMM1 (IM1-1)
#define IA1 40014
#define IA2 40692
#define IQ1 53668
#define IQ2 52774
#define IR1 12211
#define IR2 3791
#define NTAB 32
#define NDIV (1+IMM1/NTAB)
#define EPS 1.2e-7
#define RNMX (1.0-EPS)
double ran2(long *idum)
{
int j;
long k;
static long idum2=123456789;
static long iy=0;
static long iv[NTAB];
double temp;
if (*idum <= 0) {
if (-(*idum) < 1) *idum=1;
else *idum = -(*idum);
idum2=(*idum);
for (j=NTAB+7;j>=0;j--) {
k=(*idum)/IQ1;
*idum=IA1*(*idum-k*IQ1)-k*IR1;
if (*idum < 0) *idum += IM1;
if (j < NTAB) iv[j] = *idum;
}
iy=iv[0];
}
k=(*idum)/IQ1;
*idum=IA1*(*idum-k*IQ1)-k*IR1;
if (*idum < 0) *idum += IM1;
k=idum2/IQ2;
idum2=IA2*(idum2-k*IQ2)-k*IR2;
if (idum2 < 0) idum2 += IM2;
j=iy/NDIV;
iy=iv[j]-idum2;
iv[j] = *idum;
if (iy < 1) iy += IMM1;
if ((temp=AM*iy) > RNMX) return RNMX;
else {
return temp;
}
}
#undef IM1
#undef IM2
#undef AM
#undef IMM1
#undef IA1
#undef IA2
#undef IQ1
#undef IQ2
#undef IR1
#undef IR2
#undef NTAB
#undef NDIV
#undef EPS
#undef RNMX
/*..............................................................................*/
/*
From Numerical Recipes. Simplified to deal specifically with 3*3 matrices
(stored as elements [1...3][1...3] or a 4*4 array).
*/
void tred2(double a[4][4], double d[4], double e[4])
{
int l, k, j, i;
double scale, hh, h, g, f;
for (i=3; i>=2; i--) {
l=i-1;
h=scale=0.0;
if (l > 1) {
for (k=1;k<=l;k++) scale += fabs(a[i][k]);
if (scale == 0.0) e[i]=a[i][l];
else {
for (k=1;k<=l;k++) {
a[i][k] /= scale;
h += a[i][k]*a[i][k];
}
f=a[i][l];
g=(f >= 0.0 ? -sqrt(h) : sqrt(h));
e[i]=scale*g;
h -= f*g;
a[i][l]=f-g;
f=0.0;
for (j=1;j<=l;j++) {
/* a[j][i]=a[i][j]/h; */
g=0.0;
for (k=1;k<=j;k++) g += a[j][k]*a[i][k];
for (k=j+1;k<=l;k++) g += a[k][j]*a[i][k];
e[j]=g/h;
f += e[j]*a[i][j];
}
hh=f/(h+h);
for (j=1;j<=l;j++) {
f=a[i][j];
e[j]=g=e[j]-hh*f;
for (k=1;k<=j;k++) a[j][k] -= (f*e[k]+g*a[i][k]);
}
}
} else e[i]=a[i][l];
d[i]=h;
}
/* d[1]=0.0; */
e[1]=0.0;
for (i=1; i<=3; i++) {
/* l=i-1;
if (d[i]) {
for (j=1;j<=l;j++) {
g=0.0;
for (k=1;k<=l;k++) g += a[i][k]*a[k][j];
for (k=1;k<=l;k++) a[k][j] -= g*a[k][i];
}
} */
d[i]=a[i][i];
/* a[i][i]=1.0;
for (j=1;j<=l;j++) a[j][i]=a[i][j]=0.0; */
}
}
/*..............................................................................*/
/*
From Numerical Recipes. Simplified to deal specifically with 3*3 matrices
(stored as elements [1...3][1...3] or a 4*4 array).
*/
#define NRANSI
#define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a))
void tqli(double d[4], double e[4])
{
double pythag(double a, double b);
int m, l, iter, i;
/* int k; */
double s, r, p, g, f, dd, c, b;
for (i=2; i<=3; i++) e[i-1] = e[i];
e[3] = 0.0;
for (l=1; l<=3; l++) {
iter = 0;
do {
for (m=l; m<=3-1; m++) {
dd = fabs(d[m]) + fabs(d[m+1]);
if ((double)(fabs(e[m])+dd) == dd) break;
}
if (m != l) {
if (iter++ == 30) {
fprintf(stderr, "Too many iterations in tqli\n");
exit (2);
}
g = (d[l+1] - d[l]) / (2.0*e[l]);
r = pythag(g, 1.0);
g = d[m] - d[l] + e[l] / (g + SIGN(r,g));
s = c = 1.0;
p = 0.0;
for (i=m-1; i>=l; i--) {
f = s * e[i];
b = c * e[i];
e[i+1] = (r=pythag(f,g));
if (r == 0.0) {
d[i+1] -= p;
e[m] = 0.0;
break;
}
s = f/r;
c = g/r;
g = d[i+1] - p;
r = (d[i] - g)*s + 2.0*c*b;
d[i+1] = g+(p=s*r);
g = c*r - b;
/* for (k=1; k<=3; k++) {
f = z[k][i+1];
z[k][i+1] = s*z[k][i]+c*f;
z[k][i] = c*z[k][i]i - s*f;
} */
}
if (r == 0.0 && i >= l) continue;
d[l] -= p;
e[l] = g;
e[m] = 0.0;
}
} while (m != l);
}
}
#undef NRANSI
/*..............................................................................*/
/*
From Numerical Recipes. Used by tqli.
*/
#define NRANSI
static double sqrarg;
#define SQR(a) ((sqrarg=(a)) == 0.0 ? 0.0 : sqrarg*sqrarg)
double pythag(double a, double b)
{
double absa, absb;
absa = fabs(a);
absb = fabs(b);
if (absa > absb) return absa*sqrt(1.0+SQR(absb/absa));
else return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+SQR(absa/absb)));
}
#undef NRANSI
/*..............................................................................*/
/*
Normalise a vector to have unit length. For speed during heavy use, it is
not checked that the supplied vector has non-zero length.
*/
void normalise(struct vector *u)
{
double tot;
tot = sqrt( DOT(*u,*u) );
if (tot !=0.0) {
tot=1/tot;
(*u).x *= tot;
(*u).y *= tot;
(*u).z *= tot;
}
}
/*
Returns the vector pointing from the centre of mass of particle 2 to the
centre of mass of the closest image of particle 1.
*/
struct vector image(struct vector r1, struct vector r2, struct vector box)
{
struct vector r12;
double anint(double);
r12.x = r1.x - r2.x;
r12.y = r1.y - r2.y;
r12.z = r1.z - r2.z;
r12.x = box.x * (r12.x - anint(r12.x));
r12.y = box.y * (r12.y - anint(r12.y));
r12.z = box.z * (r12.z - anint(r12.z));
return r12;
}
/*
Returns the nearest integer to its argument as a double precision number. e.g.
anint(-0.49) = 0.0 and anint(-0.51) = -1.0. Equivalent to the Fortran intrinsic
ANINT.
*/
double anint(double arg)
{
if (arg < 0) {
return (double)( (long)(arg-0.5) );
} else {
return (double)( (long)(arg+0.5) );
}
}
/*..............................................................................*/
/*
Returns an evenly distributed random unit vector of unit length. See Allen &
Tildesley p349 or Frenkel & Smit p410.
RANDOM VECTOR ON UNIT SPHERE
*/
struct vector ranvec(void)
{
double a, b, xi1, xi2;
struct vector unit;
double ran2(long *);
do {
xi1 = 1.0 - 2.0*ran2(&seed);
xi2 = 1.0 - 2.0*ran2(&seed);
a = xi1*xi1 + xi2*xi2;
} while (a > 1.0);
b = 2.0 * sqrt(1.0 - a);
unit.x = xi1 * b;
unit.y = xi2 * b;
unit.z = 1.0 - 2.0*a;
return unit;
}
/**
* returns a point randomly and evenly distributed inside of a unit sphere
*/
struct vector ranvecsph(void)
{
struct vector ranvec;
double ran2(long *);
do{
ranvec.x = 2 * ran2(&seed) - 1.0;
ranvec.y = 2 * ran2(&seed) - 1.0;
ranvec.z = 2 * ran2(&seed) - 1.0;
} while(ranvec.x*ranvec.x +
ranvec.y*ranvec.y +
ranvec.z*ranvec.z >= 1);
//printf("%lf\t%lf\t%lf\n", ranvec.x,ranvec.y,ranvec.z);
return ranvec;
}
/**** some useful math *******/
struct vector vec_create(double x, double y, double z)
{
struct vector newvec;
newvec.x=x;
newvec.y=y;
newvec.z=z;
return newvec;
}
struct vector vec_createarr(double a[3])
{
struct vector newvec;
newvec.x=a[0];
newvec.y=a[1];
newvec.z=a[2];
return newvec;
}
double vec_dotproduct(struct vector A,struct vector B)
{
double dp;
dp = A.x*B.x + A.y*B.y + A.z*B.z;
return dp;
}
/* vector projection of vector A to direction of B*/
struct vector vec_project(struct vector* A,struct vector* B)
{
double dp;
struct vector pr;
dp = A->x*B->x + A->y*B->y + A->z*B->z;
pr.x=B->x*dp;
pr.y=B->y*dp;
pr.z=B->z*dp;
return pr;
}
void ortogonalise(struct vector *A, struct vector B)
{
double dp;
double vec_dotproduct(struct vector A,struct vector B);
dp=vec_dotproduct(*A,B);
(*A).x -= B.x * dp;
(*A).y -= B.y * dp;
(*A).z -= B.z * dp;
}
/* vector projection of vector A perpendicular to direction of B*/
struct vector vec_perpproject(struct vector *A,struct vector *B)
{
struct vector pp;
double dp;
struct vector vec_project(struct vector *, struct vector*);
dp=DOT((*A),(*B));
pp.x = A->x - B->x*dp;
pp.y = A->y - B->y*dp;
pp.z = A->z - B->z*dp;
// fprintf (stderr, "pp x: %.8f y: %.8f z: %.8f \n",pp.x,pp.y,pp.z);
return pp;
}
/* returns a vector perpendicular to A
nothing special about the vector except that it's one of the perpendicular options and is normalized
*/
struct vector vec_perp(struct vector A)
{
double ratio,x,y;
struct vector somevector;
struct vector vec_create(double, double, double);
struct vector vec_normalize(struct vector);
void normalise(struct vector *);
struct vector vec_crossproduct(struct vector, struct vector);
x=A.x;
y=A.y;
if (x == 0) x=1;
else {
if (y == 0) y=1;
else {
ratio=y/x;
y=x*ratio*2;
}
}
somevector= vec_create(x, y, A.z);
normalise(&somevector);
return vec_crossproduct(A,somevector);
}
/* Perform the multiplication of a matrix A and a vector B where A is the
first argument and B is the second argument. The routine will
return AxB*/
struct vector matrix_vec_multiply(double A[3][3],struct vector B)
{
int i;
double vecarr[3];
struct vector AB,RA;
struct vector vec_createarr(double[3]);
double vec_dotproduct(struct vector,struct vector);
for (i=0;i<3;i++) {
/* index the row vector from A*/
RA=vec_createarr(A[i]);
/* Now find the dot product of this row with B*/
vecarr[i]=vec_dotproduct(RA,B);
}
AB=vec_createarr(vecarr);
return AB;
}
/* Distance between two vectors*/
double vec_distance(struct vector vec1,struct vector vec2)
{
double sum;
sum= (vec1.x-vec2.x)*(vec1.x-vec2.x)+(vec1.y-vec2.y)*(vec1.y-vec2.y)+(vec1.z-vec2.z)*(vec1.z-vec2.z);
return pow(sum,0.5);
}
/* Vector size */
double vec_size(struct vector vec)
{
double size;
size=sqrt(vec.x*vec.x+ vec.y*vec.y+ vec.z*vec.z);
return size;
}
/* Normalize a vector*/
struct vector vec_normalize(struct vector vec)
{
double mag;
struct vector newvec;
double vec_size(struct vector);
mag= vec_size (vec);
mag=1/mag;
newvec.x=vec.x*mag;
newvec.y=vec.y*mag;
newvec.z=vec.z*mag;
return newvec;
}
/* Scale a vector */
struct vector vec_scale(struct vector vec, double scale)
{
vec.x=vec.x*scale;
vec.y=vec.y*scale;
vec.z=vec.z*scale;
return vec;
}
/* cross_product*/
struct vector vec_crossproduct(struct vector A,struct vector B)
{
struct vector cp;
cp.x=( A.y*B.z - A.z*B.y);
cp.y=( -A.x*B.z + A.z*B.x);
cp.z=( A.x*B.y - A.y*B.x);
return cp;
}
/* addition of vectors*/
inline
struct vector vec_sum(struct vector A,struct vector B)
{
struct vector C;
C.x=(A.x + B.x);
C.y=(A.y + B.y);
C.z=(A.z + B.z);
return C;
}
/* subtraction of vectors*/
inline
struct vector vec_sub(struct vector A,struct vector B)
{
struct vector C;
C.x=(A.x - B.x);
C.y=(A.y - B.y);
C.z=(A.z - B.z);
return C;
}
/* asign vlues of vector A by values in vector B*/
inline
void vec_asign(struct vector *A, struct vector B)
{
(*A).x=B.x;
(*A).y=B.y;
(*A).z=B.z;
}
/* generate random unit vector*/
struct vector vec_random(void)
{
struct vector newvec;
struct vector ranvec(void);
newvec=ranvec();
return newvec;
}
/*generate random unit quaternion*/
struct quat quat_random(void)
{
double cosv, sinv;
struct quat newquat;
struct vector newaxis;
struct vector ranvec(void);
/* generate quaternion for rotation*/
newaxis = ranvec(); /*random axes for rotation*/
cosv = cos(PIH * ran2(&seed) );
if (ran2(&seed) <0.5) sinv = sqrt(1.0 - cosv*cosv);
else sinv = -sqrt(1.0 - cosv*cosv);
newquat.w=cosv;
newquat.x=newaxis.x*sinv;
newquat.y=newaxis.y*sinv;
newquat.z=newaxis.z*sinv;
return newquat;
}
/* Create quaternion for rotation around vector "vec" of angle in degrees "angle"
function need cos of half angle and its sin*/
struct quat quat_create(struct vector vec, double vc, double vs)
{
struct quat newquat;
newquat.w=vc;
newquat.x=vec.x*vs;
newquat.y=vec.y*vs;
newquat.z=vec.z*vs;
return newquat;
}
/*rotate vector with quaternion*/
void vec_rotate(struct vector *vec, struct quat quat)
{
double t2,t3,t4,t5,t6,t7,t8,t9,t10,newx,newy,newz;
/* t1 = quat.w * quat.w; */
t2 = quat.w * quat.x;
t3 = quat.w * quat.y;
t4 = quat.w * quat.z;
t5 = -quat.x * quat.x;
t6 = quat.x * quat.y;
t7 = quat.x * quat.z;
t8 = -quat.y * quat.y;
t9 = quat.y * quat.z;
t10 = -quat.z * quat.z;
newx = 2.0 * ( (t8+t10)*(*vec).x + (t6-t4)*(*vec).y + (t3+t7)*(*vec).z ) + (*vec).x;
newy = 2.0 * ( (t4+t6)*(*vec).x + (t5+t10)*(*vec).y + (t9-t2)*(*vec).z ) + (*vec).y;
newz = 2.0 * ( (t7-t3)*(*vec).x + (t2+t9)*(*vec).y + (t5+t8)*(*vec).z ) + (*vec).z;
(*vec).x = newx;
(*vec).y = newy;
(*vec).z = newz;
}
/* rotate spherocylinder by quaternion of random axis and angle smaller than
maxcos(cosine of angle half), we do everything on site for speed */
void psc_rotate(struct particles *psc, double max_angle,int geotype)
{
double vc, vs, t2, t3, t4, t5, t6, t7, t8, t9, t10;
double d1, d2, d3, d4, d5, d6, d7, d8, d9 , newx, newy, newz;
int k,m;
struct quat newquat;
struct vector newaxis;
struct vector ranvec(void);
/* generate quaternion for rotation*/
newaxis = ranvec(); /*random axes for rotation*/
// maxcos = cos(maxorient/2/180*PI);
// vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/
vc = cos(max_angle * ran2(&seed) );
if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc);
else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/
newquat.w=vc;
newquat.x=newaxis.x*vs;
newquat.y=newaxis.y*vs;
newquat.z=newaxis.z*vs;
/* do quaternion rotation*/
t2 = newquat.w * newquat.x;
t3 = newquat.w * newquat.y;
t4 = newquat.w * newquat.z;
t5 = -newquat.x * newquat.x;
t6 = newquat.x * newquat.y;
t7 = newquat.x * newquat.z;
t8 = -newquat.y * newquat.y;
t9 = newquat.y * newquat.z;
t10 = -newquat.z * newquat.z;
d1 = t8 + t10;
d2 = t6 - t4;
d3 = t3 + t7;
d4 = t4 + t6;
d5 = t5 + t10;
d6 = t9 - t2;
d7 = t7 - t3;
d8 = t2 + t9;
d9 = t5 + t8;
/*rotate spherocylinder direction vector*/
newx = 2.0 * ( d1*psc->dir.x + d2*psc->dir.y + d3*psc->dir.z ) + psc->dir.x;
newy = 2.0 * ( d4*psc->dir.x + d5*psc->dir.y + d6*psc->dir.z ) + psc->dir.y;
newz = 2.0 * ( d7*psc->dir.x + d8*psc->dir.y + d9*psc->dir.z ) + psc->dir.z;
psc->dir.x = newx;
psc->dir.y = newy;
psc->dir.z = newz;
m=1;
if ( (geotype != SCN) && (geotype != SCA) ) {
if ( (geotype == TPSC) || (geotype == TCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) )
m=2;
for (k=0;k<m;k++) {
/*rotate patch direction vector*/
newx = 2.0 * ( d1*psc->patchdir[k].x + d2*psc->patchdir[k].y + d3*psc->patchdir[k].z ) + psc->patchdir[k].x;
newy = 2.0 * ( d4*psc->patchdir[k].x + d5*psc->patchdir[k].y + d6*psc->patchdir[k].z ) + psc->patchdir[k].y;
newz = 2.0 * ( d7*psc->patchdir[k].x + d8*psc->patchdir[k].y + d9*psc->patchdir[k].z ) + psc->patchdir[k].z;
psc->patchdir[k].x = newx;
psc->patchdir[k].y = newy;
psc->patchdir[k].z = newz;
/*rotate patch sides vectors*/
newx = 2.0 * ( d1*psc->patchsides[0+2*k].x + d2*psc->patchsides[0+2*k].y + d3*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].x;
newy = 2.0 * ( d4*psc->patchsides[0+2*k].x + d5*psc->patchsides[0+2*k].y + d6*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].y;
newz = 2.0 * ( d7*psc->patchsides[0+2*k].x + d8*psc->patchsides[0+2*k].y + d9*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].z;
psc->patchsides[0+2*k].x = newx;
psc->patchsides[0+2*k].y = newy;
psc->patchsides[0+2*k].z = newz;
newx = 2.0 * ( d1*psc->patchsides[1+2*k].x + d2*psc->patchsides[1+2*k].y + d3*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].x;
newy = 2.0 * ( d4*psc->patchsides[1+2*k].x + d5*psc->patchsides[1+2*k].y + d6*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].y;
newz = 2.0 * ( d7*psc->patchsides[1+2*k].x + d8*psc->patchsides[1+2*k].y + d9*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].z;
psc->patchsides[1+2*k].x = newx;
psc->patchsides[1+2*k].y = newy;
psc->patchsides[1+2*k].z = newz;
}
}
m=1;
if ( (geotype == CHPSC) || (geotype == CHCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) {
if ( (geotype == TCHPSC) || (geotype == TCHCPSC) )
m=2;
for (k=0;k<m;k++) {
/*rotate chiral direction vector*/
newx = 2.0 * ( d1*psc->chdir[k].x + d2*psc->chdir[k].y + d3*psc->chdir[k].z ) + psc->chdir[k].x;
newy = 2.0 * ( d4*psc->chdir[k].x + d5*psc->chdir[k].y + d6*psc->chdir[k].z ) + psc->chdir[k].y;
newz = 2.0 * ( d7*psc->chdir[k].x + d8*psc->chdir[k].y + d9*psc->chdir[k].z ) + psc->chdir[k].z;
psc->chdir[k].x = newx;
psc->chdir[k].y = newy;
psc->chdir[k].z = newz;
}
}
}
/*returns a position of center of mass of system*/
void masscenter(long npart, struct ia_param ia_params[MAXT][MAXT], struct conf * conf)
{
long i;
double anint(double);
conf->syscm.x = 0;
conf->syscm.y = 0;
conf->syscm.z = 0;
for (i=0; i<npart; i++) {
/*using periodic boundary conditions*/
conf->syscm.x += (conf->particle[i].pos.x - anint(conf->particle[i].pos.x) ) *
ia_params[conf->particle[i].type][conf->particle[i].type].volume;
conf->syscm.y += (conf->particle[i].pos.y - anint(conf->particle[i].pos.y) ) *
ia_params[conf->particle[i].type][conf->particle[i].type].volume;
conf->syscm.z += (conf->particle[i].pos.z - anint(conf->particle[i].pos.z) ) *
ia_params[conf->particle[i].type][conf->particle[i].type].volume;
}
conf->syscm.x /= conf->sysvolume;
conf->syscm.y /= conf->sysvolume;
conf->syscm.z /= conf->sysvolume;
return;
}
/* rotate cluster of particles by quaternion of random axis and angle smaller than
maxcos(cosine of angle half), we do everything on site for speed */
void cluster_rotate(long target, struct vector gc, double max_angle, struct topo * topo, struct conf * conf)
{
long current,i;
double vc,vs;
//double quatsize;
struct quat newquat;
struct vector newaxis;
struct vector ranvec(void);
void vec_rotate(struct vector *, struct quat);
// create rotation quaternion
newaxis = ranvec(); /*random axes for rotation*/
// maxcos = cos(maxorient/2/180*PI);
//vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/
vc = cos(max_angle * ran2(&seed) );
if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc);
else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/
newquat.w=vc;
newquat.x=newaxis.x*vs;
newquat.y=newaxis.y*vs;
newquat.z=newaxis.z*vs;
//quatsize=sqrt(newquat.w*newquat.w+newquat.x*newquat.x+newquat.y*newquat.y+newquat.z*newquat.z);
//shift position to geometrical center
i=0;
current = topo->chainlist[target][0];
while (current >=0 ) {
//shift position to geometrical center
conf->particle[current].pos.x -= gc.x;
conf->particle[current].pos.y -= gc.y;
conf->particle[current].pos.z -= gc.z;
//scale things by box not to have them distorted
conf->particle[current].pos.x *= conf->box.x;
conf->particle[current].pos.y *= conf->box.y;
conf->particle[current].pos.z *= conf->box.z;
//do rotation
vec_rotate(&conf->particle[current].pos, newquat);
vec_rotate(&conf->particle[current].dir, newquat);
vec_rotate(&conf->particle[current].patchdir[0], newquat);
vec_rotate(&conf->particle[current].patchdir[1], newquat);
vec_rotate(&conf->particle[current].chdir[0], newquat);
vec_rotate(&conf->particle[current].chdir[1], newquat);
vec_rotate(&conf->particle[current].patchsides[0], newquat);
vec_rotate(&conf->particle[current].patchsides[1], newquat);
vec_rotate(&conf->particle[current].patchsides[2], newquat);
vec_rotate(&conf->particle[current].patchsides[3], newquat);
//sclae back
conf->particle[current].pos.x /= conf->box.x;
conf->particle[current].pos.y /= conf->box.y;
conf->particle[current].pos.z /= conf->box.z;
//shift positions back
conf->particle[current].pos.x += gc.x;
conf->particle[current].pos.y += gc.y;
conf->particle[current].pos.z += gc.z;
i++;
current = topo->chainlist[target][i];
}
}
/* put the particle in the original box using periodic boundary conditions
in our system the particle positions are scaled by box size so to get them
into original obx is to get htem between 0 and 1 and then scale this back
by size of box*/
void origbox(struct vector *pos,struct vector box)
{
double anint(double);
(*pos).x = box.x * ((*pos).x - anint((*pos).x));
(*pos).y = box.y * ((*pos).y - anint((*pos).y));
(*pos).z = box.z * ((*pos).z - anint((*pos).z));
}
/* use of periodic boundary conditions*/
void usepbc(struct vector *pos,struct vector pbc)
{
do {
(*pos).x += pbc.x;
} while ((*pos).x < 0.0);
do {
(*pos).x -= pbc.x;
} while ((*pos).x > pbc.x);
do {
(*pos).y += pbc.y;
} while ((*pos).y < 0.0);
do {
(*pos).y -= pbc.y;
} while ((*pos).y > pbc.y);
do {
(*pos).z += pbc.z;
} while ((*pos).z < 0.0);
do {
(*pos).z -= pbc.z;
} while ((*pos).z > pbc.z);
}
/*..............................................................................*/
/*.......................TEMPLATE FILES.........................................*/
/*..............................................................................*/
/*
# Template for the "options" file. Options start with an '#'.
# Pressure couplings:
# 0 = anisotropic coupling, 1 = isotropic coupling, 2 = isotropic in xy z=const, 3 = isotropic
# xy and keep Volume constant
# Wang-Landau method: (with constant decrease of bias addition by factor of 2, until less than WL_ALPHATOL)
# O = none, 1 = z-direction of 1st paticle, 2 = hole in xyplane, 3 = z-orientation of 0th particle
# 4 = distance of first two particles, 5 = pore around z axis and above CM, 6 = pore around z axis and above 0th particle
# 7 = number of particles in contact (within distance sqrt(WL_CONTACTS))
ptype = 1 # Pressure coupling type (0-anisotropic xyz, 1-isotropic xyz, 2 - isotropic in xy z=const, 3 - isotropic in xy and V=const)
press = 1 # Pressure
paralpress = 1 # Parallel pressure for replica exchange
shave = 0 # Average number of volume change attempts per sweep (usually 1)
nequil = 0 # Number of equilibration sweeps
adjust = 0 # Number of equilibration sweeps between step size adjustments
nsweeps = 1000000 # Number of production sweeps
paramfrq = 1000000 # Number of sweeps between order parameter samples
report = 1000000 # Number of sweeps between statistics reports
nrepchange = 1000 # Number of sweeps between replica exchanges
movie = 100000 # Number of sweeps between movie frames (0 = no movie)
chainprob = 0.0 # Probability of chain move attempts per sweep ( 0.25/number of particles in chain)
transmx = 0.212 # Initial maximum displacement
rotmx = 7.5 # Initial maximum orientation change (degrees)
edge_mx = 0.0 # Initial maximum box length change
chainmmx = 0.0 # Initial maximum chain displacement
chainrmx = 0.0 # Initial maximum chain rotation change (degrees)
temper = 1.0 # Temperature in units kT/e
paraltemper = 1.5 # Temperature for parallel tempering in kT/e
wlm = 0 # Wang-Landau method
wlmtype = 0 # For which atomic type (from top.init) should the Wang-Landau method be calculated?
switchprob = 0.0016 # Probability of type switch attempts per sweep
pairlist_update = 8 # Number of sweeps after which the pairlist should be updated
seed = 1 # Random number seed
write_cluster = 10000 # Number of sweeps per writing out cluster info
# End of the file
*/
/*
Example of 'Config.init' file, but you must delete comments... there are only number in configuration file
#box
10.0 10.0 10.0
#particles (x,y,z) (direction_x,direction_y, direction_z) (patchdirection_x,patchdirection_y,patchdirection_z) (switched)
*/
/*
Template for the topology file 'top.init'. ( "\\" is symbol for line continue,
"#" is symbol for comment, "[" is starting sign for keyword, "]" is ending sign
for kyeword ) There are three keywords, types, molecules, and system. They
should be given in this order.
TYPES:
spherocylinders
SC - purely repulsive spherocylinder with WCA potential on closest distance
SCA - isotropic cos^2 potential is acting isotropicaly dependent only on
closest distance between spherocylinders..
PSC - Attractive potential in limited to an angular wedge on spherocylinder. Patch
goes all the way through, making also hemispherical caps on end attractive
CPSC - Attractive potential in limited to an angular wedge on cylindrical part
of spherocylinders. The hemispherical caps on ends are repulsive
spheres
(T)(CH)PSC - T adds second patch, CH - adds chirality
SP - purely repulsive shpere with WCA potential on closest distance
SPA - isotropic cos^2 potential is acting isotropicaly dependent only on
closest distance between obejcts
[Types]
# NAME NUMBER GEOTYPE EPSILON SIGMA ATTRACTION_DIST ATTRACTION_SWITCH PATCH_ANGLE PATCH_SWITCH SC_LENGTH (Optional second patch: PATCH_ROTATION PATCH_ANGLE PATCH_SWITCH )CHIRAL_ANGLE
Prot1 1 PSC 1 1.2 1.346954458 1.0 80.0 5.0 3
Prot2 2 PSC 1 1.2 1.346954458 1.0 170.0 5.0 3
Prot3 3 CHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 10
Prot4 4 TCHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 90.0 90.0 5.0 10
[Molecules]
# Molecules letter
# bond1 - harmonic bond between nearest neighbours (end points for spherocylinders) (first constant then eq distance)
# bond2 - harmonic bond between second nearest neighbours (their center of mass) (first constant then eq distance)
# bondd - directional harmonic bond between nearest neighbours (end point of the second spherocylinder is attached to the point of bondlength extension of the first spherocylinder) (first constant then eq distance)
# angle1 - angle between two spherocylinders -nearest neighbours (first constant then eq degrees 0-180.0)
# angle2 - angle between two spherocylinder patches -nearest neighbours (first constant then eq degrees 0-180.0)
# particles - types as they go in chain in molecule
A: {
#what: TYPE SWITCHTYPE DELTA_MU
particles: 1 2 0.5
particles: 2
}
B: {
particles: 1
particles: 2 1 0.3
}
[System]
A 2
B 2
[EXTER]
# wall interaction
# THICKNESS EPSILON ATTRACTION_SWITCH
5.0 1.0 1.0
[EXCLUDE]
#set pair types for which attraction will be excluded (reversepair is automaticaly added)
1 2
1 3
*/
|
fft.c | /* Copyright 2013-2014. The Regents of the University of California.
* Copyright 2016-2018. Martin Uecker.
* Copyright 2018. Massachusetts Institute of Technology.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2011-2018 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2014 Frank Ong <frankong@berkeley.edu>
* 2018 Siddharth Iyer <ssi@mit.edu>
*
*
* FFT. It uses FFTW or CUFFT internally.
*
*
* Gauss, Carl F. 1805. "Nachlass: Theoria Interpolationis Methodo Nova
* Tractata." Werke 3, pp. 265-327, Königliche Gesellschaft der
* Wissenschaften, Göttingen, 1866
*/
#include <assert.h>
#include <complex.h>
#include <stdbool.h>
#include <math.h>
#include <fftw3.h>
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/ops.h"
#include "misc/misc.h"
#include "misc/debug.h"
#include "fft.h"
#undef fft_plan_s
#ifdef USE_CUDA
#include "num/gpuops.h"
#include "fft-cuda.h"
#define LAZY_CUDA
#endif
void fftscale2(unsigned int N, const long dimensions[N], unsigned long flags, const long ostrides[N], complex float* dst, const long istrides[N], const complex float* src)
{
long fft_dims[N];
md_select_dims(N, flags, fft_dims, dimensions);
float scale = 1. / sqrtf((float)md_calc_size(N, fft_dims));
md_zsmul2(N, dimensions, ostrides, dst, istrides, src, scale);
}
void fftscale(unsigned int N, const long dims[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dims, CFL_SIZE);
fftscale2(N, dims, flags, strs, dst, strs, src);
}
static double fftmod_phase(long length, int j)
{
long center1 = length / 2;
double shift = (double)center1 / (double)length;
return ((double)j - (double)center1 / 2.) * shift;
}
static void fftmod2_r(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src, bool inv, double phase)
{
if (0 == flags) {
md_zsmul2(N, dims, ostrs, dst, istrs, src, cexp(M_PI * 2.i * (inv ? -phase : phase)));
return;
}
/* this will also currently be slow on the GPU because we do not
* support strides there on the lowest level */
unsigned int i = N - 1;
while (!MD_IS_SET(flags, i))
i--;
#if 1
// If there is only one dimensions left and it is the innermost
// which is contiguous optimize using md_zfftmod2
if ((0u == MD_CLEAR(flags, i)) && (1 == md_calc_size(i, dims))
&& (CFL_SIZE == ostrs[i]) && (CFL_SIZE == istrs[i])) {
md_zfftmod2(N - i, dims + i, ostrs + i, dst, istrs + i, src, inv, phase);
return;
}
#endif
long tdims[N];
md_select_dims(N, ~MD_BIT(i), tdims, dims);
#pragma omp parallel for
for (int j = 0; j < dims[i]; j++)
fftmod2_r(N, tdims, MD_CLEAR(flags, i),
ostrs, (void*)dst + j * ostrs[i], istrs, (void*)src + j * istrs[i],
inv, phase + fftmod_phase(dims[i], j));
}
static unsigned long clear_singletons(unsigned int N, const long dims[N], unsigned long flags)
{
return (0 == N) ? flags : clear_singletons(N - 1, dims, (1 == dims[N - 1]) ? MD_CLEAR(flags, N - 1) : flags);
}
void fftmod2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
fftmod2_r(N, dims, clear_singletons(N, dims, flags), ostrs, dst, istrs, src, false, 0.);
}
/*
* The correct usage is fftmod before and after fft and
* ifftmod before and after ifft (this is different from
* how fftshift/ifftshift has to be used)
*/
void ifftmod2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
fftmod2_r(N, dims, clear_singletons(N, dims, flags), ostrs, dst, istrs, src, true, 0.);
}
void fftmod(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
fftmod2(N, dimensions, flags, strs, dst, strs, src);
}
void ifftmod(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
ifftmod2(N, dimensions, flags, strs, dst, strs, src);
}
void ifftshift2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
long pos[N];
md_set_dims(N, pos, 0);
for (unsigned int i = 0; i < N; i++)
if (MD_IS_SET(flags, i))
pos[i] = dims[i] - dims[i] / 2;
md_circ_shift2(N, dims, pos, ostrs, dst, istrs, src, CFL_SIZE);
}
void ifftshift(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
ifftshift2(N, dimensions, flags, strs, dst, strs, src);
}
void fftshift2(unsigned int N, const long dims[N], unsigned long flags, const long ostrs[N], complex float* dst, const long istrs[N], const complex float* src)
{
long pos[N];
md_set_dims(N, pos, 0);
for (unsigned int i = 0; i < N; i++)
if (MD_IS_SET(flags, i))
pos[i] = dims[i] / 2;
md_circ_shift2(N, dims, pos, ostrs, dst, istrs, src, CFL_SIZE);
}
void fftshift(unsigned int N, const long dimensions[N], unsigned long flags, complex float* dst, const complex float* src)
{
long strs[N];
md_calc_strides(N, strs, dimensions, CFL_SIZE);
fftshift2(N, dimensions, flags, strs, dst, strs, src);
}
struct fft_plan_s {
INTERFACE(operator_data_t);
fftwf_plan fftw;
unsigned int D;
unsigned long flags;
bool backwards;
const long* dims;
const long* istrs;
const long* ostrs;
#ifdef USE_CUDA
struct fft_cuda_plan_s* cuplan;
#endif
};
static DEF_TYPEID(fft_plan_s);
#ifdef USE_FFTW_WISDOM
static char* fftw_wisdom_name(int N, bool backwards, unsigned int flags, const long dims[N])
{
char* tbpath = getenv("TOOLBOX_PATH");
if (NULL == tbpath)
return NULL;
// Space for path and null terminator.
int space = snprintf(NULL, 0, "%s/save/fftw/N_%d_BACKWARD_%d_FLAGS_%d_DIMS", tbpath, N, backwards, flags);
// Space for dimensions.
for (int idx = 0; idx < N; idx ++)
space += snprintf(NULL, 0, "_%lu", dims[idx]);
// Space for extension.
space += snprintf(NULL, 0, ".fftw");
// Space for null terminator.
space += 1;
int len = space;
char* loc = calloc(space, sizeof(char));
if (NULL == loc)
error("memory out");
int ret = snprintf(loc, len, "%s/save/fftw/N_%d_BACKWARD_%d_FLAGS_%d_DIMS", tbpath, N, backwards, flags);
assert(ret < len);
len -= ret;
for (int idx = 0; idx < N; idx++) {
char tmp[64];
ret = sprintf(tmp, "_%lu", dims[idx]);
assert(ret < 64);
len -= ret;
strcat(loc, tmp);
}
strcat(loc, ".fftw");
len -= 5;
assert(1 == len);
assert('\0' == loc[space - 1]);
return loc;
}
#endif //USE_FFTW_WISDOM
static fftwf_plan fft_fftwf_plan(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards, bool measure)
{
fftwf_plan fftwf;
unsigned int N = D;
fftwf_iodim64 dims[N];
fftwf_iodim64 hmdims[N];
unsigned int k = 0;
unsigned int l = 0;
#ifdef USE_FFTW_WISDOM
char* wisdom = fftw_wisdom_name(D, backwards, flags, dimensions);
if (NULL != wisdom)
fftwf_import_wisdom_from_filename(wisdom);
#endif //USE_FFTW_WISDOM
//FFTW seems to be fine with this
//assert(0 != flags);
for (unsigned int i = 0; i < N; i++) {
if (MD_IS_SET(flags, i)) {
dims[k].n = dimensions[i];
dims[k].is = istrides[i] / CFL_SIZE;
dims[k].os = ostrides[i] / CFL_SIZE;
k++;
} else {
hmdims[l].n = dimensions[i];
hmdims[l].is = istrides[i] / CFL_SIZE;
hmdims[l].os = ostrides[i] / CFL_SIZE;
l++;
}
}
#pragma omp critical
fftwf = fftwf_plan_guru64_dft(k, dims, l, hmdims, (complex float*)src, dst,
backwards ? 1 : (-1), measure ? FFTW_MEASURE : FFTW_ESTIMATE);
#ifdef USE_FFTW_WISDOM
if (NULL != wisdom)
fftwf_export_wisdom_to_filename(wisdom);
md_free(wisdom);
#endif //USE_FFTW_WISDOM
return fftwf;
}
static void fft_apply(const operator_data_t* _plan, unsigned int N, void* args[N])
{
complex float* dst = args[0];
const complex float* src = args[1];
const auto plan = CAST_DOWN(fft_plan_s, _plan);
assert(2 == N);
if (0u == plan->flags) {
md_copy2(plan->D, plan->dims, plan->ostrs, dst, plan->istrs, src, CFL_SIZE);
return;
}
#ifdef USE_CUDA
if (cuda_ondevice(src)) {
#ifdef LAZY_CUDA
if (NULL == plan->cuplan)
((struct fft_plan_s*)plan)->cuplan = fft_cuda_plan(plan->D, plan->dims, plan->flags, plan->ostrs, plan->istrs, plan->backwards);
#endif
assert(NULL != plan->cuplan);
fft_cuda_exec(plan->cuplan, dst, src);
} else
#endif
{
assert(NULL != plan->fftw);
fftwf_execute_dft(plan->fftw, (complex float*)src, dst);
}
}
static void fft_free_plan(const operator_data_t* _data)
{
const auto plan = CAST_DOWN(fft_plan_s, _data);
if (NULL != plan->fftw)
fftwf_destroy_plan(plan->fftw);
#ifdef USE_CUDA
if (NULL != plan->cuplan)
fft_cuda_free_plan(plan->cuplan);
#endif
xfree(plan->dims);
xfree(plan->istrs);
xfree(plan->ostrs);
xfree(plan);
}
const struct operator_s* fft_measure_create(unsigned int D, const long dimensions[D], unsigned long flags, bool inplace, bool backwards)
{
flags &= md_nontriv_dims(D, dimensions);
PTR_ALLOC(struct fft_plan_s, plan);
SET_TYPEID(fft_plan_s, plan);
complex float* src = md_alloc(D, dimensions, CFL_SIZE);
complex float* dst = inplace ? src : md_alloc(D, dimensions, CFL_SIZE);
long strides[D];
md_calc_strides(D, strides, dimensions, CFL_SIZE);
plan->fftw = NULL;
if (0u != flags)
plan->fftw = fft_fftwf_plan(D, dimensions, flags, strides, dst, strides, src, backwards, true);
md_free(src);
if (!inplace)
md_free(dst);
#ifdef USE_CUDA
plan->cuplan = NULL;
#ifndef LAZY_CUDA
if (cuda_ondevice(src) && (0u != flags)
plan->cuplan = fft_cuda_plan(D, dimensions, flags, strides, strides, backwards);
#endif
#endif
plan->D = D;
plan->flags = flags;
plan->backwards = backwards;
PTR_ALLOC(long[D], dims);
md_copy_dims(D, *dims, dimensions);
plan->dims = *PTR_PASS(dims);
PTR_ALLOC(long[D], istrs);
md_copy_strides(D, *istrs, strides);
plan->istrs = *PTR_PASS(istrs);
PTR_ALLOC(long[D], ostrs);
md_copy_strides(D, *ostrs, strides);
plan->ostrs = *PTR_PASS(ostrs);
return operator_create2(D, dimensions, strides, D, dimensions, strides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan);
}
const struct operator_s* fft_create2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src, bool backwards)
{
flags &= md_nontriv_dims(D, dimensions);
PTR_ALLOC(struct fft_plan_s, plan);
SET_TYPEID(fft_plan_s, plan);
plan->fftw = NULL;
if (0u != flags)
plan->fftw = fft_fftwf_plan(D, dimensions, flags, ostrides, dst, istrides, src, backwards, false);
#ifdef USE_CUDA
plan->cuplan = NULL;
#ifndef LAZY_CUDA
if (cuda_ondevice(src) && (0u != flags)
plan->cuplan = fft_cuda_plan(D, dimensions, flags, ostrides, istrides, backwards);
#endif
#endif
plan->D = D;
plan->flags = flags;
plan->backwards = backwards;
PTR_ALLOC(long[D], dims);
md_copy_dims(D, *dims, dimensions);
plan->dims = *PTR_PASS(dims);
PTR_ALLOC(long[D], istrs);
md_copy_strides(D, *istrs, istrides);
plan->istrs = *PTR_PASS(istrs);
PTR_ALLOC(long[D], ostrs);
md_copy_strides(D, *ostrs, ostrides);
plan->ostrs = *PTR_PASS(ostrs);
return operator_create2(D, dimensions, ostrides, D, dimensions, istrides, CAST_UP(PTR_PASS(plan)), fft_apply, fft_free_plan);
}
const struct operator_s* fft_create(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src, bool backwards)
{
long strides[D];
md_calc_strides(D, strides, dimensions, CFL_SIZE);
return fft_create2(D, dimensions, flags, strides, dst, strides, src, backwards);
}
void fft_exec(const struct operator_s* o, complex float* dst, const complex float* src)
{
operator_apply_unchecked(o, dst, src);
}
void fft_free(const struct operator_s* o)
{
operator_free(o);
}
void fft2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
const struct operator_s* plan = fft_create2(D, dimensions, flags, ostrides, dst, istrides, src, false);
fft_exec(plan, dst, src);
fft_free(plan);
}
void ifft2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
const struct operator_s* plan = fft_create2(D, dimensions, flags, ostrides, dst, istrides, src, true);
fft_exec(plan, dst, src);
fft_free(plan);
}
void fft(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src)
{
const struct operator_s* plan = fft_create(D, dimensions, flags, dst, src, false);
fft_exec(plan, dst, src);
fft_free(plan);
}
void ifft(unsigned int D, const long dimensions[D], unsigned long flags, complex float* dst, const complex float* src)
{
const struct operator_s* plan = fft_create(D, dimensions, flags, dst, src, true);
fft_exec(plan, dst, src);
fft_free(plan);
}
void fftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fftmod(D, dimensions, flags, dst, src);
fft(D, dimensions, flags, dst, dst);
fftmod(D, dimensions, flags, dst, dst);
}
void ifftc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifftmod(D, dimensions, flags, dst, src);
ifft(D, dimensions, flags, dst, dst);
ifftmod(D, dimensions, flags, dst, dst);
}
void fftc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fftmod2(D, dimensions, flags, ostrides, dst, istrides, src);
fft2(D, dimensions, flags, ostrides, dst, ostrides, dst);
fftmod2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifftmod2(D, dimensions, flags, ostrides, dst, istrides, src);
ifft2(D, dimensions, flags, ostrides, dst, ostrides, dst);
ifftmod2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void fftu(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fft(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void ifftu(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifft(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void fftu2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fft2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftu2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifft2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void fftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
fftc(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void ifftuc(unsigned int D, const long dimensions[__VLA(D)], unsigned long flags, complex float* dst, const complex float* src)
{
ifftc(D, dimensions, flags, dst, src);
fftscale(D, dimensions, flags, dst, dst);
}
void fftuc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
fftc2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
void ifftuc2(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], complex float* dst, const long istrides[D], const complex float* src)
{
ifftc2(D, dimensions, flags, ostrides, dst, istrides, src);
fftscale2(D, dimensions, flags, ostrides, dst, ostrides, dst);
}
bool fft_threads_init = false;
void fft_set_num_threads(unsigned int n)
{
#ifdef FFTWTHREADS
#pragma omp critical
if (!fft_threads_init) {
fft_threads_init = true;
fftwf_init_threads();
}
#pragma omp critical
fftwf_plan_with_nthreads(n);
#else
UNUSED(n);
#endif
}
|
GB_binop__bget_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bget_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bget_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bget_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int64)
// C=scalar+B GB (_bind1st__bget_int64)
// C=scalar+B' GB (_bind1st_tran__bget_int64)
// C=A+scalar GB (_bind2nd__bget_int64)
// C=A'+scalar GB (_bind2nd_tran__bget_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_BITGET (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITGET (x, y, int64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT64 || GxB_NO_BGET_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bget_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bget_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bget_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bget_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bget_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITGET (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bget_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITGET (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (x, aij, int64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bget_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITGET (aij, y, int64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bget_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lis_matvec_dns.c | /* Copyright (C) 2002-2012 The SSI Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE
PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "lis_config.h"
#else
#ifdef HAVE_CONFIG_WIN32_H
#include "lis_config_win32.h"
#endif
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "lislib.h"
void lis_matvec_dns(LIS_MATRIX A, LIS_SCALAR x[], LIS_SCALAR y[])
{
LIS_INT i,j,is,ie;
LIS_INT np,n,nprocs,my_rank;
n = A->n;
np = A->np;
#ifdef _OPENMP
nprocs = omp_get_max_threads();
#else
nprocs = 1;
#endif
#ifdef _OPENMP
#pragma omp parallel private(i,j,is,ie,my_rank)
#endif
{
#ifdef _OPENMP
my_rank = omp_get_thread_num();
#else
my_rank = 0;
#endif
LIS_GET_ISIE(my_rank,nprocs,n,is,ie);
for(i=is;i<ie;i++)
{
y[i] = 0;
}
for(j=0;j<np;j++)
{
for(i=is;i<ie;i++)
{
y[i] += A->value[j*n+i] * x[j];
}
}
}
}
void lis_matvect_dns(LIS_MATRIX A, LIS_SCALAR x[], LIS_SCALAR y[])
{
LIS_INT i,j;
LIS_INT np,n;
LIS_SCALAR t;
#ifdef _OPENMP
LIS_INT is,ie,nprocs,my_rank;
LIS_SCALAR *w;
#endif
n = A->n;
np = A->np;
#ifdef _OPENMP
nprocs = omp_get_max_threads();
w = (LIS_SCALAR *)lis_malloc( nprocs*np*sizeof(LIS_SCALAR),"lis_matvect_dns::w" );
#pragma omp parallel private(i,j,t,is,ie,my_rank)
{
my_rank = omp_get_thread_num();
LIS_GET_ISIE(my_rank,nprocs,n,is,ie);
memset( &w[my_rank*np], 0, np*sizeof(LIS_SCALAR) );
for(j=0;j<np;j++)
{
t = 0.0;
for(i=is;i<ie;i++)
{
t += A->value[j*n+i] * x[i];
}
w[my_rank*np + j] = t;
}
#pragma omp barrier
#pragma omp for
for(i=0;i<np;i++)
{
t = 0.0;
for(j=0;j<nprocs;j++)
{
t += w[j*np+i];
}
y[i] = t;
}
}
lis_free(w);
#else
for(j=0;j<np;j++)
{
t = 0.0;
for(i=0;i<n;i++)
{
t += A->value[j*n+i] * x[i];
}
y[j] = t;
}
#endif
}
|
DRB063-outeronly1-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized.
*/
int n = 100, m = 100;
double b[100][100];
int init()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name init#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name init#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<100; j ++ )
{
b[i][j]=(i*j);
}
}
_ret_val_0=0;
return _ret_val_0;
}
void foo()
{
int i, j;
/* Be careful about bounds of j */
#pragma cetus private(i, j)
#pragma loop name foo#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<n; i ++ )
{
#pragma cetus private(j)
#pragma loop name foo#0#0
for (j=0; j<(m-1); j ++ )
{
b[i][j]=b[i][j+1];
}
}
return ;
}
int print()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name print#0
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name print#0#0
for (j=0; j<100; j ++ )
{
printf("%lf\n", b[i][j]);
}
}
_ret_val_0=0;
return _ret_val_0;
}
int main()
{
int _ret_val_0;
init();
foo();
print();
_ret_val_0=0;
return _ret_val_0;
}
|
viterbi_decode_op.h | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/operators/controlflow/compare_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_functor.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/fluid/operators/gather.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/transpose_op.h"
#include "paddle/fluid/operators/unique_op.h"
#ifdef PADDLE_WITH_MKLML
#include <omp.h>
#endif
namespace paddle {
namespace operators {
using LoDTensor = framework::LoDTensor;
template <typename DeviceContext, typename T, typename IndType>
struct Argmax {
void operator()(const framework::ExecutionContext& ctx, const Tensor& input,
Tensor* out_idx, Tensor* out, int axis) {
framework::DDim input_dims = input.dims();
int64_t pre = 1;
int64_t post = 1;
int64_t n = input_dims[axis];
for (int i = 0; i < axis; i++) {
pre *= input_dims[i];
}
for (int i = axis + 1; i < input_dims.size(); i++) {
post *= input_dims[i];
}
int64_t height = pre * post;
int64_t width = n;
const T* in_data = input.data<T>();
IndType* out_idx_data = out_idx->data<IndType>();
T* out_data = out->data<T>();
// Reduce
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int64_t i = 0; i < height; ++i) {
int64_t h = i / post;
int64_t w = i % post;
IndType max_idx = -1;
T max_value = (std::numeric_limits<T>::lowest)(); // for windows compile
for (int64_t j = 0; j < width; ++j) {
if (in_data[h * width * post + j * post + w] > max_value) {
max_value = in_data[h * width * post + j * post + w];
max_idx = j;
}
}
out_data[i] = max_value;
out_idx_data[i] = max_idx;
}
}
};
template <typename DeviceContext>
struct ARange {
void operator()(const DeviceContext& dev_ctx, int64_t* data, int end,
int64_t scale) {
for (int i = 0; i < end; ++i) {
data[i] = i * scale;
}
}
};
template <typename DeviceContext, typename T>
struct GetMaxValue {
void operator()(const DeviceContext& dev_ctx, const Tensor& input,
T* max_value) {
auto input_ptr = input.data<T>();
auto num = input.numel();
*max_value = *std::max_element(input_ptr, input_ptr + num);
}
};
template <typename DeviceContext, typename T, typename IndexT = int>
struct Gather {
void operator()(const DeviceContext& ctx, const Tensor& src,
const Tensor& index, Tensor* output) {
CPUGather<T, IndexT>(ctx, src, index, output);
}
};
template <typename T, typename Functor, typename OutT = T>
void SameDimsBinaryOP(const Tensor& lhs, const Tensor& rhs, Tensor* out) {
const T* lhs_ptr = lhs.data<T>();
const T* rhs_ptr = rhs.data<T>();
OutT* out_ptr = out->data<OutT>();
Functor functor;
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < out->numel(); ++i) {
out_ptr[i] = functor(lhs_ptr[i], rhs_ptr[i]);
}
}
template <typename DeviceContext, template <typename T> typename CompareFunctor,
typename T>
struct GetMask {
void operator()(const framework::ExecutionContext& ctx, const Tensor& lhs,
const Tensor& rhs, Tensor* mask) {
SameDimsBinaryOP<int64_t, CompareFunctor<int64_t>, T>(lhs, rhs, mask);
}
};
template <bool is_multi_threads>
struct GetInputIndex {
void operator()(const std::vector<int>& lhs_dims,
const std::vector<int>& rhs_dims,
const std::vector<int>& output_dims,
const std::vector<int>& lhs_strides,
const std::vector<int>& rhs_strides,
const std::vector<int>& output_strides, int output_idx,
int* index_array, int* lhs_idx, int* rhs_idx) {
int out_dims_size = output_strides.size();
for (int j = 0; j < out_dims_size; ++j) {
int curr_idx = output_idx / output_strides[j];
output_idx %= output_strides[j];
*lhs_idx += (lhs_dims[j] > 1) ? curr_idx * lhs_strides[j] : 0;
*rhs_idx += (rhs_dims[j] > 1) ? curr_idx * rhs_strides[j] : 0;
}
}
};
template <>
struct GetInputIndex<false> {
void operator()(const std::vector<int>& lhs_dims,
const std::vector<int>& rhs_dims,
const std::vector<int>& output_dims,
const std::vector<int>& lhs_strides,
const std::vector<int>& rhs_strides,
const std::vector<int>& output_strides, int output_idx,
int* index_array, int* lhs_idx, int* rhs_idx) {
int out_dims_size = output_strides.size();
*lhs_idx = GetElementwiseIndex(lhs_dims.data(), out_dims_size, index_array);
*rhs_idx = GetElementwiseIndex(rhs_dims.data(), out_dims_size, index_array);
UpdateElementwiseIndexArray(output_dims.data(), out_dims_size, index_array);
}
};
template <typename T, typename Functor, bool is_multi_threads = false>
void SimpleBroadcastBinaryOP(const Tensor& lhs, const Tensor& rhs,
Tensor* out) {
const T* lhs_ptr = lhs.data<T>();
const T* rhs_ptr = rhs.data<T>();
T* out_ptr = out->data<T>();
int out_size = static_cast<int>(out->dims().size());
std::vector<int> out_dims(out_size);
std::vector<int> lhs_dims(out_size);
std::vector<int> rhs_dims(out_size);
std::copy(lhs.dims().Get(), lhs.dims().Get() + out_size, lhs_dims.data());
std::copy(rhs.dims().Get(), rhs.dims().Get() + out_size, rhs_dims.data());
std::copy(out->dims().Get(), out->dims().Get() + out_size, out_dims.data());
std::vector<int> output_strides(out_size, 1);
std::vector<int> lhs_strides(out_size, 1);
std::vector<int> rhs_strides(out_size, 1);
std::vector<int> index_array(out_size, 0);
// calculate strides
for (int i = out_size - 2; i >= 0; --i) {
output_strides[i] = output_strides[i + 1] * out_dims[i + 1];
lhs_strides[i] = lhs_strides[i + 1] * lhs_dims[i + 1];
rhs_strides[i] = rhs_strides[i + 1] * rhs_dims[i + 1];
}
Functor functor;
GetInputIndex<is_multi_threads> get_input_index;
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < out->numel(); ++i) {
int lhs_idx = 0;
int rhs_idx = 0;
get_input_index(lhs_dims, rhs_dims, out_dims, lhs_strides, rhs_strides,
output_strides, i, index_array.data(), &lhs_idx, &rhs_idx);
out_ptr[i] = functor(lhs_ptr[lhs_idx], rhs_ptr[rhs_idx]);
}
}
template <typename DeviceContext, template <typename T> typename BinaryFunctor,
typename T>
struct BinaryOperation {
void operator()(const DeviceContext& dev_ctx, const Tensor& lhs,
const Tensor& rhs, Tensor* output) {
if (lhs.dims() == rhs.dims()) {
SameDimsBinaryOP<T, BinaryFunctor<T>>(lhs, rhs, output);
} else {
bool is_multi_threads = false;
#ifdef PADDLE_WITH_MKLML
if (omp_get_max_threads() > 1) {
is_multi_threads = true;
}
#endif
if (is_multi_threads) {
SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, true>(lhs, rhs, output);
} else {
SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, false>(lhs, rhs, output);
}
}
}
};
class TensorBuffer {
public:
explicit TensorBuffer(const LoDTensor& in) : buffer_(in), offset_(0) {
buffer_.Resize({buffer_.numel()});
}
Tensor GetBufferBlock(std::initializer_list<int64_t> shape) {
int64_t size = std::accumulate(shape.begin(), shape.end(), 1,
std::multiplies<int64_t>());
Tensor block = buffer_.Slice(offset_, offset_ + size);
offset_ += size;
block.Resize(shape);
return block;
}
private:
LoDTensor buffer_; // need to resize 1-D Tensor
int offset_;
};
template <typename DeviceContext, typename T>
class ViterbiDecodeKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
bool include_bos_eos_tag = ctx.Attr<bool>("include_bos_eos_tag");
auto& dev_ctx = ctx.template device_context<DeviceContext>();
auto curr_place = ctx.GetPlace();
auto* input = ctx.Input<Tensor>("Input");
auto batch_size = static_cast<int>(input->dims()[0]);
auto seq_len = static_cast<int>(input->dims()[1]);
auto n_labels = static_cast<int>(input->dims()[2]);
math::SetConstant<DeviceContext, T> float_functor;
math::SetConstant<DeviceContext, int64_t> int_functor;
std::vector<Tensor> historys;
// We create tensor buffer in order to avoid allocating memory frequently
// 10 means allocate 10*batch_size bytes memory, such as int_mask, zero...
int buffer_size = batch_size * (n_labels + 1) * seq_len + 10 * batch_size;
LoDTensor int_buffer;
int_buffer.Resize(framework::make_ddim({buffer_size}));
int_buffer.mutable_data<int64_t>(ctx.GetPlace());
TensorBuffer int_tensor_buffer(int_buffer);
// create float tensor buffer
// 10 means allocate 10*batch_size*n_labels bytes, such as alpha, alpha_max
buffer_size = batch_size * (seq_len + 10) * n_labels +
(batch_size + 2) * n_labels * n_labels;
LoDTensor float_buffer;
float_buffer.Resize(framework::make_ddim({buffer_size}));
float_buffer.mutable_data<T>(ctx.GetPlace());
TensorBuffer float_tensor_buffer(float_buffer);
auto* length = ctx.Input<Tensor>("Length");
Tensor left_length = int_tensor_buffer.GetBufferBlock({batch_size, 1});
framework::TensorCopy(*length, curr_place, dev_ctx, &left_length);
int64_t max_seq_len = 0;
GetMaxValue<DeviceContext, int64_t> get_max_value;
get_max_value(dev_ctx, left_length, &max_seq_len);
auto* scores = ctx.Output<Tensor>("Scores");
scores->mutable_data<T>(curr_place);
auto* path = ctx.Output<Tensor>("Path");
path->Resize({batch_size, max_seq_len});
path->mutable_data<int64_t>(curr_place);
Tensor tpath = int_tensor_buffer.GetBufferBlock({max_seq_len, batch_size});
auto batch_path = Unbind(tpath);
for (auto it = batch_path.begin(); it != batch_path.end(); ++it) {
it->Resize({batch_size});
}
// create and init required tensor
Tensor input_exp =
float_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels});
TransCompute<DeviceContext, T>(3, dev_ctx, *input, &input_exp, {1, 0, 2});
auto* transition = ctx.Input<Tensor>("Transition");
Tensor trans_exp = float_tensor_buffer.GetBufferBlock({n_labels, n_labels});
framework::TensorCopy(*transition, curr_place, dev_ctx, &trans_exp);
trans_exp.Resize({1, n_labels, n_labels});
Tensor alpha = float_tensor_buffer.GetBufferBlock({batch_size, n_labels});
Tensor zero = int_tensor_buffer.GetBufferBlock({batch_size, 1});
int_functor(dev_ctx, &zero, 0);
Tensor one = int_tensor_buffer.GetBufferBlock({batch_size, 1});
int_functor(dev_ctx, &one, 1);
Tensor float_one = float_tensor_buffer.GetBufferBlock({batch_size, 1});
float_functor(dev_ctx, &float_one, static_cast<T>(1.0));
Tensor alpha_trn_sum =
float_tensor_buffer.GetBufferBlock({batch_size, n_labels, n_labels});
Tensor alpha_max =
float_tensor_buffer.GetBufferBlock({batch_size, n_labels});
Tensor alpha_argmax =
int_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels});
auto alpha_argmax_unbind = Unbind(alpha_argmax);
Tensor alpha_nxt =
float_tensor_buffer.GetBufferBlock({batch_size, n_labels});
Tensor int_mask = int_tensor_buffer.GetBufferBlock({batch_size});
Tensor zero_len_mask = int_tensor_buffer.GetBufferBlock({batch_size});
Tensor float_mask = float_tensor_buffer.GetBufferBlock({batch_size, 1});
Tensor stop_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels});
Tensor start_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels});
Tensor rest_trans =
float_tensor_buffer.GetBufferBlock({1, n_labels - 2, n_labels});
Tensor last_ids = int_tensor_buffer.GetBufferBlock({batch_size});
Tensor last_ids_tmp = int_tensor_buffer.GetBufferBlock({batch_size});
Tensor batch_offset = int_tensor_buffer.GetBufferBlock({batch_size});
Tensor gather_idx = int_tensor_buffer.GetBufferBlock({batch_size});
std::vector<const Tensor*> shape{&rest_trans, &stop_trans, &start_trans};
std::vector<Tensor*> outputs{&rest_trans, &stop_trans, &start_trans};
math::SplitFunctor<DeviceContext, T> split_functor;
split_functor(dev_ctx, trans_exp, shape, 1, &outputs);
stop_trans.Resize({1, n_labels});
start_trans.Resize({1, n_labels});
auto logit0 = input_exp.Slice(0, 1);
logit0.Resize({batch_size, n_labels});
BinaryOperation<DeviceContext, AddFunctor, T> AddFloat;
BinaryOperation<DeviceContext, AddFunctor, int64_t> AddInt;
BinaryOperation<DeviceContext, MulFunctor, T> MulFloat;
BinaryOperation<DeviceContext, MulFunctor, int64_t> MulInt;
BinaryOperation<DeviceContext, SubFunctor, T> SubFloat;
BinaryOperation<DeviceContext, SubFunctor, int64_t> SubInt;
if (include_bos_eos_tag) {
AddFloat(dev_ctx, logit0, start_trans, &alpha);
GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one,
&float_mask);
MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt);
AddFloat(dev_ctx, alpha, alpha_nxt, &alpha);
} else {
alpha = logit0;
}
SubInt(dev_ctx, left_length, one, &left_length);
Argmax<DeviceContext, T, int64_t> argmax;
for (int64_t i = 1; i < max_seq_len; ++i) {
Tensor logit = input_exp.Slice(i, i + 1);
logit.Resize({batch_size, n_labels});
Tensor& alpha_exp = alpha.Resize({batch_size, n_labels, 1});
AddFloat(dev_ctx, alpha_exp, trans_exp, &alpha_trn_sum);
auto alpha_argmax_temp = alpha_argmax_unbind[i - 1];
alpha_argmax_temp.Resize({batch_size, n_labels});
argmax(ctx, alpha_trn_sum, &alpha_argmax_temp, &alpha_max, 1);
historys.emplace_back(alpha_argmax_temp);
AddFloat(dev_ctx, alpha_max, logit, &alpha_nxt);
alpha.Resize({batch_size, n_labels});
// mask = paddle.cast((left_length > 0), dtype='float32')
// alpha = mask * alpha_nxt + (1 - mask) * alpha
GetMask<DeviceContext, GreaterThanFunctor, T>()(ctx, left_length, zero,
&float_mask);
// alpha_nxt = mask * alpha_nxt
MulFloat(dev_ctx, alpha_nxt, float_mask, &alpha_nxt);
// inv_mask = 1 - mask
SubFloat(dev_ctx, float_one, float_mask, &float_mask);
// alpha = (1 - mask) * alpha
MulFloat(dev_ctx, alpha, float_mask, &alpha);
// alpha += alpha_nxt
AddFloat(dev_ctx, alpha, alpha_nxt, &alpha);
if (include_bos_eos_tag) {
GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one,
&float_mask);
// alpha += mask * trans_exp[:, self.stop_idx]
MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt);
AddFloat(dev_ctx, alpha, alpha_nxt, &alpha);
}
SubInt(dev_ctx, left_length, one, &left_length);
}
argmax(ctx, alpha, &last_ids, scores, 1);
left_length.Resize({batch_size});
GetMask<DeviceContext, GreaterEqualFunctor, int64_t>()(ctx, left_length,
zero, &int_mask);
// last_ids_update = last_ids * tag_mask
int last_ids_index = 1;
int actual_len = (std::min)(seq_len, static_cast<int>(max_seq_len));
MulInt(dev_ctx, last_ids, int_mask,
&batch_path[actual_len - last_ids_index]);
// The algorithm below can refer to
// https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/layers/crf.py#L438
ARange<DeviceContext> arange;
arange(dev_ctx, batch_offset.data<int64_t>(), batch_size, n_labels);
Gather<DeviceContext, int64_t, int64_t> gather;
for (auto hist = historys.rbegin(); hist != historys.rend(); ++hist) {
++last_ids_index;
AddInt(dev_ctx, left_length, one, &left_length);
AddInt(dev_ctx, batch_offset, last_ids, &gather_idx);
Tensor& last_ids_update = batch_path[actual_len - last_ids_index];
hist->Resize({batch_size * n_labels});
gather(dev_ctx, *hist, gather_idx, &last_ids_update);
GetMask<DeviceContext, GreaterThanFunctor, int64_t>()(ctx, left_length,
zero, &int_mask);
MulInt(dev_ctx, last_ids_update, int_mask, &last_ids_update);
GetMask<DeviceContext, EqualFunctor, int64_t>()(ctx, left_length, zero,
&zero_len_mask);
MulInt(dev_ctx, last_ids, zero_len_mask, &last_ids_tmp);
SubInt(dev_ctx, one, zero_len_mask, &zero_len_mask);
MulInt(dev_ctx, last_ids_update, zero_len_mask, &last_ids_update);
AddInt(dev_ctx, last_ids_update, last_ids_tmp, &last_ids_update);
GetMask<DeviceContext, LessThanFunctor, int64_t>()(ctx, left_length, zero,
&int_mask);
MulInt(dev_ctx, last_ids, int_mask, &last_ids);
AddInt(dev_ctx, last_ids_update, last_ids, &last_ids);
}
TransCompute<DeviceContext, int64_t>(2, dev_ctx, tpath, path, {1, 0});
}
};
} // namespace operators
} // namespace paddle
|
tsp_hh06.c | /*
Description:
This program executes my implementation of the "Heinritz Hsiao" algorithm to solve the "Travelling Salesman Problem"
Next city in path is either the closest or second closest one, depending on the value of <PICK_CLOSEST_CITY_POSSIBILITY>
Abides by Lab 3 Exercise 5 requirements
Author:
Georgios Evangelou (1046900)
Year: 5
Parallel Programming in Machine Learning Problems
Electrical and Computer Engineering Department, University of Patras
System Specifications:
CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips)
GPU: Nvidia GTX 1050 (dual-fan, overclocked)
RAM: 8GB (dual-channel, @2666 MHz)
Version Notes:
Compiles/Runs/Debugs with: gcc tsp_hh06.c -o tsp_hh06 -lm -O3 -pg -fopenmp && time ./tsp_hh06 && gprof ./tsp_hh06
Executes the algorithm for 10.000 cities, spanning in an area of 1.000x1.000 km and produces correct results
Inherits all settings of versions tsp_hh04 and tsp_hh05, unless stated otherwise
Function IsInPath() is substituted by boolean array
Needs a little more time than tsp_hh05, because of the parallelism overhead, but produces slightly better results
Results when: PICK_CLOSEST_CITY_POSSIBILITY = 1.00 ===> Minimum total path distance: 89515.94
PICK_CLOSEST_CITY_POSSIBILITY = 0.95 ===> Minimum total path distance: 90720.28
PICK_CLOSEST_CITY_POSSIBILITY = 0.90 ===> Minimum total path distance: 94532.01
PICK_CLOSEST_CITY_POSSIBILITY = 0.85 ===> Minimum total path distance: 97698.78
PICK_CLOSEST_CITY_POSSIBILITY = 0.80 ===> Minimum total path distance: 101386.71
PICK_CLOSEST_CITY_POSSIBILITY = 0.75 ===> Minimum total path distance: 103783.51
Needs: ~ 0.9 seconds to calculate an optimal path using 12 threads and all optimizations listed below
*/
// ****************************************************************************************************************
#pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline") //Apply O3 and extra optimizations
#pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system
#pragma GCC target("avx") //Enable AVX
// ****************************************************************************************************************
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "omp.h"
#include "stdbool.h"
// ****************************************************************************************************************
#define N 10000
#define Nx 1000
#define Ny 1000
#define nonExist -999999
#define PICK_CLOSEST_CITY_POSSIBILITY 0.90
#define THREADS 12
// ****************************************************************************************************************
float CitiesX[N];
float CitiesY[N];
int ThreadsPath[THREADS][N+1];
double CalculatedDistances[N][N];
// ****************************************************************************************************************
// Initializes the cities' positions
// ****************************************************************************************************************
void SetCities() {
printf("Now initializing the positions of the cities...\n");
for (int i=0; i<N; i++) {
CitiesX[i] = Nx * (float) rand() / RAND_MAX;
CitiesY[i] = Ny * (float) rand() / RAND_MAX;
}
}
// ****************************************************************************************************************
// Prints the cities' positions
// ****************************************************************************************************************
void PrintCities() {
printf("> The cities are:\n");
for (int i=0; i<N; i++) {
printf(">> City: %6d X:%5.2f Y:%5.2f\n", i, CitiesX[i], CitiesY[i] );
}
printf("\n");
}
// ****************************************************************************************************************
// Prints the travelling path
// ****************************************************************************************************************
void PrintPath_2(int Path[]) {
printf("> The path is:\n");
for (int i=0; i<N+1; i++) {
printf(">> %d ", Path[i]);
}
printf("\n");
}
// ****************************************************************************************************************
// Visually maps the cities' positions
// ****************************************************************************************************************
void MapCities() {
int Map[Ny+1][Nx+1];
printf("Now creating a visual map of the cities...\n");
for (int i=0; i<Nx+1; i++)
for (int j=0; j<Ny+1; j++)
Map[j][i] = (float) nonExist;
//printf("Quantized coordinates are:\n");
for (int c=0; c<N; c++) {
int x = (int) CitiesX[c] ;
int y = (int) CitiesY[c] ;
//printf(" City:%d y=%d and x=%d\n",c,y,x);
if (Map[y][x] == nonExist) Map[y][x] = c;
else Map[y][x] = -1;
}
printf("This is the cities' map:\n");
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
for (int y=0; y<Ny+1; y++){
for (int x=0; x<Nx+1; x++)
printf("%8d ", Map[y][x]);
printf("\n");
}
printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
printf("\n");
}
// ****************************************************************************************************************
// Finds Euclidean distance between two cities
// ****************************************************************************************************************
double Distance(int A, int B) {
return (double) sqrt( (CitiesX[A]-CitiesX[B])*(CitiesX[A]-CitiesX[B]) + (CitiesY[A]-CitiesY[B])*(CitiesY[A]-CitiesY[B]) );
}
// ****************************************************************************************************************
// Finds Eucleidian distance in a given path
// ****************************************************************************************************************
double PathDistance_2(int Path[]) {
double totDist = 0.0;
for (int i=0; i<N; i++) {
totDist += Distance(Path[i], Path[i+1]);
}
totDist += Distance(Path[N], Path[0]);
return totDist;
}
// ****************************************************************************************************************
// Finds all Eucleidian distances between all pairs of cities
// ****************************************************************************************************************
void CalculateAllDistances() {
printf("Now calculating distances between all pairs of cities...\n");
for (int i=0; i<N; i++) {
printf("\r> Progress: %.2f%%", 100*(i+1)/((float)N));
for (int j=i+1; j<N; j++) {
double temp = Distance(i, j);
CalculatedDistances[i][j] = temp;
CalculatedDistances[j][i] = temp;
}
}
printf(" ===> Completed.\n");
}
// ****************************************************************************************************************
// Finds the travelling path by visiting the closest or second closest non-visited city each time
// ****************************************************************************************************************
double FindShortestStepPath_2() {
#pragma omp master
{
printf("Now finding the shortest / second shortest step path...\n");
printf("> Threads running independently in parallel: %d\n", omp_get_num_threads());
}
double totDist = 0.0;
int visited_cities = 1, current_city = 0, thread = omp_get_thread_num();
bool CityIsVisited[N]; for (int i=0; i<N; i++) CityIsVisited[i] = false;
ThreadsPath[thread][0] = current_city; ThreadsPath[thread][N] = current_city; CityIsVisited[current_city] = false;
do {
#pragma omp master
printf("\r> Progress: %.2f%%", 100*(visited_cities)/((float)N));
double dist = 0, min_dist_1 = INFINITY, min_dist_2 = INFINITY;
int closest_city_1 = -1, closest_city_2 = -1;
for (int i=0; i<N; i++) {
if (CityIsVisited[i] == true) continue; //If we are trying to access current city or a visited one, go to next
dist = CalculatedDistances[current_city][i];
if (min_dist_1 > dist) {
min_dist_2 = min_dist_1; closest_city_2 = closest_city_1;
min_dist_1 = dist; closest_city_1 = i;
} else if (min_dist_2 > dist) {
min_dist_2 = dist; closest_city_2 = i;
}
}
unsigned seed = 11*visited_cities + 83*thread + 11*omp_get_wtime() + current_city;
float random_number = ((float)rand_r(&seed)) / ((float)RAND_MAX) ;
int city_pick = (random_number<PICK_CLOSEST_CITY_POSSIBILITY) ? 1 : 2;
int next_city = (city_pick==1) ? closest_city_1 : closest_city_2;
ThreadsPath[thread][visited_cities++] = next_city;
CityIsVisited[next_city] = true;
current_city = next_city;
totDist += (city_pick==1) ? min_dist_1 : min_dist_2;;
} while (visited_cities<N);
totDist += CalculatedDistances[ThreadsPath[thread][N-1]][0];
#pragma omp barrier
#pragma omp single
printf("\r> Progress: 100.00%% ===> Completed.\n");
#pragma omp barrier
//printf(">> I am thread #(%2d) and my total path distance is: %lf.02\n", thread, totDist);
return totDist;
}
// ****************************************************************************************************************
// The main program
// ****************************************************************************************************************
int main( int argc, const char* argv[] ) {
printf("------------------------------------------------------------------------------\n");
printf("This program searches for the optimal traveling distance between %d cities,\n", N);
printf("spanning in an area of X=(0,%d) and Y=(0,%d)\n", Nx, Ny);
printf("------------------------------------------------------------------------------\n");
srand(1046900);
SetCities();
CalculateAllDistances();
double totDistEstimation = INFINITY;
#pragma omp parallel reduction(min:totDistEstimation) num_threads(THREADS)
{
totDistEstimation = FindShortestStepPath_2();
}
printf("\n");
printf("Minimum total path distance found is: %.2lf\n", totDistEstimation);
return 0 ;
}
|
loss.h | #pragma once
#include <dmlc/data.h>
#include <dmlc/io.h>
#include <math.h>
#include "config.pb.h"
#include "progress.h"
#include "base/spmv.h"
#include "base/binary_class_evaluation.h"
namespace dmlc {
namespace linear {
/**
* \brief Scalar loss
*
* a loss which takes as input a real value prediction and a
* real valued label and outputs a non-negative loss value. Examples include the
* hinge hinge loss, binary classification loss, and univariate regression loss.
*/
template <typename V>
class ScalarLoss {
public:
ScalarLoss() : init_(false) { }
virtual ~ScalarLoss() { }
/**
* \brief init
*
* @param data X and Y
* @param w weight
* @param nt num of threads
*/
void Init(const RowBlock<unsigned>& data,
const std::vector<V>& w, int nt) {
data_ = data;
Xw_.resize(data_.size);
SpMV::Times(data_, w, &Xw_, nt_);
nt_ = nt;
init_ = true;
}
/*! \brief evaluate the loss value */
virtual void Evaluate(Progress* prog) {
CHECK(init_);
prog->new_ex() = data_.size;
prog->count() = 1;
}
/*! \brief compute the gradients */
virtual void CalcGrad(std::vector<V>* grad) = 0;
/**
* \brief save prediction
* \param prob_out output probability
*/
virtual void Predict(Stream* fo, bool prob_out) {
CHECK(init_); CHECK_NOTNULL(fo);
ostream os(fo);
if (prob_out) {
for (auto p : Xw_) os << 1.0 / (1.0 + exp( - p )) << "\n";
} else {
for (auto p : Xw_) os << p << "\n";
}
}
protected:
bool init_;
RowBlock<unsigned> data_;
std::vector<V> Xw_; // X * w
int nt_;
};
/**
* \brief binary classification with label y = +1 / -1
*/
template <typename V>
class BinClassLoss : public ScalarLoss<V> {
public:
using ScalarLoss<V>::data_;
using ScalarLoss<V>::Xw_;
using ScalarLoss<V>::nt_;
virtual void Evaluate(Progress* prog) {
ScalarLoss<V>::Evaluate(prog);
BinClassEval<V> eval(data_.label, Xw_.data(), Xw_.size(), nt_);
prog->auc() = eval.AUC();
prog->acc() = eval.Accuracy(0);
}
};
/**
* \brief logistic loss: \f$ log(1+exp(−y \langle x, w \rangle)) \f$
*/
template <typename V>
class LogitLoss : public BinClassLoss<V> {
public:
using ScalarLoss<V>::data_;
using ScalarLoss<V>::Xw_;
using ScalarLoss<V>::nt_;
using ScalarLoss<V>::init_;
virtual void Evaluate(Progress* prog) {
BinClassLoss<V>::Evaluate(prog);
BinClassEval<V> eval(data_.label, Xw_.data(), Xw_.size(), nt_);
prog->objv() = eval.LogitObjv();
}
virtual void CalcGrad(std::vector<V>* grad) {
CHECK(init_);
std::vector<V> dual(data_.size);
#pragma omp parallel for num_threads(nt_)
for (size_t i = 0; i < data_.size; ++i) {
V y = data_.label[i] > 0 ? 1 : -1;
dual[i] = - y / ( 1 + exp ( y * Xw_[i] ));
}
SpMV::TransTimes(data_, dual, grad, nt_);
}
};
/**
* \brief square hinge loss: \f$ \max\left(0, (1-yp)^2\right) \f$
*/
template <typename V>
class SquareHingeLoss : public BinClassLoss<V> {
public:
using ScalarLoss<V>::data_;
using ScalarLoss<V>::Xw_;
using ScalarLoss<V>::nt_;
using ScalarLoss<V>::init_;
virtual void Evaluate(Progress* prog) {
BinClassLoss<V>::Evaluate(prog);
V objv = 0;
#pragma omp parallel for reduction(+:objv) num_threads(nt_)
for (size_t i = 0; i < data_.size; ++i) {
V y = data_.label[i] > 0 ? 1 : -1;
V tmp = std::max(1 - y * Xw_[i], (V)0);
objv += tmp * tmp;
}
prog->objv() = objv;
}
virtual void CalcGrad(std::vector<V>* grad) {
CHECK(init_);
std::vector<V> dual(data_.size);
#pragma omp parallel for num_threads(nt_)
for (size_t i = 0; i < data_.size; ++i) {
V y = data_.label[i] > 0 ? 1 : -1;
dual[i] = y * (y * Xw_[i] > 1.0);
}
SpMV::TransTimes(data_, dual, grad, nt_);
#pragma omp parallel for num_threads(nt_)
for (size_t i = 0; i < grad->size(); ++i) {
(*grad)[i] *= -2.0;
}
}
};
/**
* \brief squared loss \f$ \frac12 (p-y)^2 \f$
*/
template <typename V>
class SquareLoss : public ScalarLoss<V> {
public:
// TODO
};
/**
* \brief loss factory
*/
template <typename V>
static ScalarLoss<V>* CreateLoss(Config::Loss loss) {
switch (loss) {
case Config::LOGIT:
return new LogitLoss<V>();
case Config::SQUARE_HINGE:
return new SquareHingeLoss<V>();
default:
LOG(FATAL) << "unknown type: " << loss;
}
return NULL;
}
} // namespace linear
} // namespace dmlc
|
ZQ_CNN_MTCNN.h | #ifndef _ZQ_CNN_MTCNN_H_
#define _ZQ_CNN_MTCNN_H_
#pragma once
#include "ZQ_CNN_Net.h"
#include "ZQ_CNN_BBoxUtils.h"
#include <omp.h>
namespace ZQ
{
class ZQ_CNN_MTCNN
{
public:
using string = std::string;
ZQ_CNN_MTCNN()
{
min_size = 60;
thresh[0] = 0.6;
thresh[1] = 0.7;
thresh[2] = 0.7;
nms_thresh[0] = 0.6;
nms_thresh[1] = 0.7;
nms_thresh[2] = 0.7;
width = 0;
height = 0;
factor = 0.709;
pnet_overlap_thresh_count = 4;
pnet_size = 12;
pnet_stride = 2;
special_handle_very_big_face = false;
force_run_pnet_multithread = false;
show_debug_info = false;
limit_r_num = 0;
limit_o_num = 0;
limit_l_num = 0;
}
~ZQ_CNN_MTCNN()
{
}
private:
#if __ARM_NEON
const int BATCH_SIZE = 16;
#else
const int BATCH_SIZE = 64;
#endif
std::vector<ZQ_CNN_Net> pnet, rnet, onet, lnet;
bool has_lnet;
int thread_num;
float thresh[3], nms_thresh[3];
int min_size;
int width, height;
float factor;
int pnet_overlap_thresh_count;
int pnet_size;
int pnet_stride;
int rnet_size;
int onet_size;
int lnet_size;
bool special_handle_very_big_face;
bool do_landmark;
float early_accept_thresh;
float nms_thresh_per_scale;
bool force_run_pnet_multithread;
std::vector<float> scales;
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> pnet_images;
ZQ_CNN_Tensor4D_NHW_C_Align128bit input, rnet_image, onet_image;
bool show_debug_info;
int limit_r_num;
int limit_o_num;
int limit_l_num;
public:
void TurnOnShowDebugInfo() { show_debug_info = true; }
void TurnOffShowDebugInfo() { show_debug_info = false; }
void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0)
{
limit_r_num = limit_r;
limit_o_num = limit_o;
limit_l_num = limit_l;
}
bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model,
const string& onet_param, const string& onet_model, int thread_num = 1,
bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "")
{
if (thread_num < 1)
force_run_pnet_multithread = true;
else
force_run_pnet_multithread = false;
thread_num = __max(1, thread_num);
pnet.resize(thread_num);
rnet.resize(thread_num);
onet.resize(thread_num);
this->has_lnet = has_lnet;
if (has_lnet)
{
lnet.resize(thread_num);
}
bool ret = true;
for (int i = 0; i < thread_num; i++)
{
ret = pnet[i].LoadFrom(pnet_param, pnet_model,true,1e-9, true)
&& rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true)
&& onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true);
if (has_lnet && ret)
ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true);
if (!ret)
break;
}
if (!ret)
{
pnet.clear();
rnet.clear();
onet.clear();
if (has_lnet)
lnet.clear();
this->thread_num = 0;
}
else
this->thread_num = thread_num;
if (show_debug_info)
{
printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0),
onet[0].GetNumOfMulAdd() / (1024.0*1024.0));
if (has_lnet)
printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0));
}
int C, H, W;
rnet[0].GetInputDim(C, H, W);
rnet_size = H;
onet[0].GetInputDim(C, H, W);
onet_size = H;
if (has_lnet)
{
lnet[0].GetInputDim(C, H, W);
lnet_size = H;
}
return ret;
}
bool InitFromBuffer(
const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len,
const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len,
const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len,
int thread_num = 1, bool has_lnet = false,
const char* lnet_param = 0, __int64 lnet_param_len = 0, const char* lnet_model = 0, __int64 lnet_model_len = 0)
{
if (thread_num < 1)
force_run_pnet_multithread = true;
else
force_run_pnet_multithread = false;
thread_num = __max(1, thread_num);
pnet.resize(thread_num);
rnet.resize(thread_num);
onet.resize(thread_num);
this->has_lnet = has_lnet;
if(has_lnet)
lnet.resize(thread_num);
bool ret = true;
for (int i = 0; i < thread_num; i++)
{
ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len,pnet_model,pnet_model_len, true, 1e-9, true)
&& rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len, true, 1e-9, true)
&& onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len, true, 1e-9, true);
if (has_lnet && ret)
ret = lnet[i].LoadFromBuffer(lnet_param, lnet_param_len, lnet_model, lnet_model_len, true, 1e-9, true);
if (!ret)
break;
}
if (!ret)
{
pnet.clear();
rnet.clear();
onet.clear();
if (has_lnet)
lnet.clear();
this->thread_num = 0;
}
else
this->thread_num = thread_num;
if (show_debug_info)
{
printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0),
onet[0].GetNumOfMulAdd() / (1024.0*1024.0));
if (has_lnet)
printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0));
}
int C, H, W;
rnet[0].GetInputDim(C, H, W);
rnet_size = H;
onet[0].GetInputDim(C, H, W);
onet_size = H;
return ret;
}
void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7,
float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709,
int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false,
bool do_landmark = true, float early_accept_thresh = 1.00)
{
min_size = __max(pnet_size, min_face_size);
thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh);
nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh);
scale_factor = __max(0.5, __min(0.97, scale_factor));
this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count);
this->pnet_size = pnet_size;
this->pnet_stride = pnet_stride;
this->special_handle_very_big_face = special_handle_very_big_face;
this->do_landmark = do_landmark;
this->early_accept_thresh = early_accept_thresh;
if (pnet_size == 20 && pnet_stride == 4)
nms_thresh_per_scale = 0.45;
else
nms_thresh_per_scale = 0.495;
if (width != w || height != h || factor != scale_factor)
{
scales.clear();
pnet_images.clear();
width = w; height = h;
float minside = __min(width, height);
int MIN_DET_SIZE = pnet_size;
float m = (float)MIN_DET_SIZE / min_size;
minside *= m;
while (minside > MIN_DET_SIZE)
{
scales.push_back(m);
minside *= factor;
m *= factor;
}
minside = __min(width, height);
int count = scales.size();
for (int i = scales.size() - 1; i >= 0; i--)
{
if (ceil(scales[i] * minside) <= pnet_size)
{
count--;
}
}
if (special_handle_very_big_face)
{
if (count > 2)
count--;
scales.resize(count);
if (count > 0)
{
float last_size = ceil(scales[count - 1] * minside);
for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2)
{
scales.push_back((float)tmp_size / minside);
count++;
}
}
scales.push_back((float)pnet_size / minside);
count++;
}
else
{
scales.push_back((float)pnet_size / minside);
count++;
}
pnet_images.resize(count);
}
}
bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results)
{
double t1 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox;
if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox))
return false;
//results = firstBbox;
//return true;
if (limit_r_num > 0)
{
_select(firstBbox, limit_r_num, _width, _height);
}
double t2 = omp_get_wtime();
if (!_Rnet_stage(firstBbox, secondBbox))
return false;
//results = secondBbox;
//return true;
if (limit_o_num > 0)
{
_select(secondBbox, limit_o_num, _width, _height);
}
if (!has_lnet || !do_landmark)
{
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, results))
return false;
double t4 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", (int)results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n",
1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3));
}
}
else
{
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, thirdBbox))
return false;
if (limit_l_num > 0)
{
_select(thirdBbox, limit_l_num, _width, _height);
}
double t4 = omp_get_wtime();
if (!_Lnet_stage(thirdBbox, results))
return false;
double t5 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", (int)results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n",
1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4));
}
}
return true;
}
bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results)
{
double t1 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox;
if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox))
return false;
//results = firstBbox;
//return true;
if (limit_r_num > 0)
{
_select(firstBbox, limit_r_num, _width, _height);
}
double t2 = omp_get_wtime();
if (!_Rnet_stage(firstBbox, secondBbox))
return false;
//results = secondBbox;
//return true;
if (limit_o_num > 0)
{
_select(secondBbox, limit_o_num, _width, _height);
}
if (!has_lnet || !do_landmark)
{
return false;
}
double t3 = omp_get_wtime();
if (!_Onet_stage(secondBbox, thirdBbox))
return false;
if (limit_l_num > 0)
{
_select(thirdBbox, limit_l_num, _width, _height);
}
double t4 = omp_get_wtime();
if (!_Lnet106_stage(thirdBbox, results))
return false;
double t5 = omp_get_wtime();
if (show_debug_info)
{
printf("final found num: %d\n", (int)results.size());
printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n",
1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4));
}
return true;
}
private:
void _compute_Pnet_single_thread(std::vector<std::vector<float> >& maps,
std::vector<int>& mapH, std::vector<int>& mapW)
{
int scale_num = 0;
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
scale_num++;
mapH.push_back((changedH - pnet_size) / pnet_stride + 1);
mapW.push_back((changedW - pnet_size) / pnet_stride + 1);
}
maps.resize(scale_num);
for (int i = 0; i < scale_num; i++)
{
maps[i].resize(mapH[i] * mapW[i]);
}
for (int i = 0; i < scale_num; i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
float cur_scale_x = (float)width / changedW;
float cur_scale_y = (float)height / changedH;
double t10 = omp_get_wtime();
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
double t11 = omp_get_wtime();
if (scales[i] != 1)
pnet[0].Forward(pnet_images[i]);
else
pnet[0].Forward(input);
double t12 = omp_get_wtime();
if (show_debug_info)
printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n",
i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11));
const ZQ_CNN_Tensor4D* score = pnet[0].GetBlobByName("prob1");
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
if(row < mapH[i] && col < mapW[i])
maps[i][row*mapW[i] + col] = *p;
p += scorePixStep;
}
}
}
}
void _compute_Pnet_multi_thread(std::vector<std::vector<float> >& maps,
std::vector<int>& mapH, std::vector<int>& mapW)
{
if (thread_num <= 1)
{
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1)
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
if (scales[i] != 1)
{
input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0);
}
}
}
int scale_num = 0;
for (int i = 0; i < scales.size(); i++)
{
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
scale_num++;
mapH.push_back((changedH - pnet_size) / pnet_stride + 1);
mapW.push_back((changedW - pnet_size) / pnet_stride + 1);
}
maps.resize(scale_num);
for (int i = 0; i < scale_num; i++)
{
maps[i].resize(mapH[i] * mapW[i]);
}
std::vector<int> task_rect_off_x;
std::vector<int> task_rect_off_y;
std::vector<int> task_rect_width;
std::vector<int> task_rect_height;
std::vector<float> task_scale;
std::vector<int> task_scale_id;
int stride = pnet_stride;
const int block_size = 64 * stride;
int cellsize = pnet_size;
int border_size = cellsize - stride;
int overlap_border_size = cellsize / stride;
int jump_size = block_size - border_size;
for (int i = 0; i < scales.size(); i++)
{
int changeH = (int)ceil(height*scales[i]);
int changeW = (int)ceil(width*scales[i]);
if (changeH < pnet_size || changeW < pnet_size)
continue;
int block_H_num = 0;
int block_W_num = 0;
int start = 0;
while (start < changeH)
{
block_H_num++;
if (start + block_size >= changeH)
break;
start += jump_size;
}
start = 0;
while (start < changeW)
{
block_W_num++;
if (start + block_size >= changeW)
break;
start += jump_size;
}
for (int s = 0; s < block_H_num; s++)
{
for (int t = 0; t < block_W_num; t++)
{
int rect_off_x = t * jump_size;
int rect_off_y = s * jump_size;
int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x;
int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y;
if (rect_width >= cellsize && rect_height >= cellsize)
{
task_rect_off_x.push_back(rect_off_x);
task_rect_off_y.push_back(rect_off_y);
task_rect_width.push_back(rect_width);
task_rect_height.push_back(rect_height);
task_scale.push_back(scales[i]);
task_scale_id.push_back(i);
}
}
}
}
//
int task_num = task_scale.size();
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_pnet_images(thread_num);
if (thread_num <= 1)
{
for (int i = 0; i < task_num; i++)
{
int thread_id = omp_get_thread_num();
int scale_id = task_scale_id[i];
float cur_scale = task_scale[i];
int i_rect_off_x = task_rect_off_x[i];
int i_rect_off_y = task_rect_off_y[i];
int i_rect_width = task_rect_width[i];
int i_rect_height = task_rect_height[i];
if (scale_id == 0 && scales[0] == 1)
{
if (!input.ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
else
{
if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
if (!pnet[thread_id].Forward(task_pnet_images[thread_id]))
continue;
const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1");
int task_count = 0;
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
int real_row = row + i_rect_off_y / stride;
int real_col = col + i_rect_off_x / stride;
if (real_row < mapH[scale_id] && real_col < mapW[scale_id])
maps[scale_id][real_row*mapW[scale_id] + real_col] = *p;
p += scorePixStep;
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num)
for (int i = 0; i < task_num; i++)
{
int thread_id = omp_get_thread_num();
int scale_id = task_scale_id[i];
float cur_scale = task_scale[i];
int i_rect_off_x = task_rect_off_x[i];
int i_rect_off_y = task_rect_off_y[i];
int i_rect_width = task_rect_width[i];
int i_rect_height = task_rect_height[i];
if (scale_id == 0 && scales[0] == 1)
{
if (!input.ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
else
{
if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id],
i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0))
continue;
}
if (!pnet[thread_id].Forward(task_pnet_images[thread_id]))
continue;
const ZQ_CNN_Tensor4D* score = pnet[thread_id].GetBlobByName("prob1");
int task_count = 0;
//score p
int scoreH = score->GetH();
int scoreW = score->GetW();
int scorePixStep = score->GetPixelStep();
const float *p = score->GetFirstPixelPtr() + 1;
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
int real_row = row + i_rect_off_y / stride;
int real_col = col + i_rect_off_x / stride;
if (real_row < mapH[scale_id] && real_col < mapW[scale_id])
maps[scale_id][real_row*mapW[scale_id] + real_col] = *p;
p += scorePixStep;
}
}
}
}
}
bool _Pnet_stage(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& firstBbox)
{
if (thread_num <= 0)
return false;
double t1 = omp_get_wtime();
firstBbox.clear();
if (width != _width || height != _height)
return false;
if (!input.ConvertFromBGR(bgr_img, width, height, _widthStep))
return false;
double t2 = omp_get_wtime();
if (show_debug_info)
printf("convert cost: %.3f ms\n", 1000 * (t2 - t1));
std::vector<std::vector<float> > maps;
std::vector<int> mapH;
std::vector<int> mapW;
if (thread_num == 1 && !force_run_pnet_multithread)
{
pnet[0].TurnOffShowDebugInfo();
//pnet[0].TurnOnShowDebugInfo();
_compute_Pnet_single_thread(maps, mapH, mapW);
}
else
{
_compute_Pnet_multi_thread(maps, mapH, mapW);
}
ZQ_CNN_OrderScore order;
std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size());
std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size());
const int block_size = 32;
int stride = pnet_stride;
int cellsize = pnet_size;
int border_size = cellsize / stride;
for (int i = 0; i < maps.size(); i++)
{
double t13 = omp_get_wtime();
int changedH = (int)ceil(height*scales[i]);
int changedW = (int)ceil(width*scales[i]);
if (changedH < pnet_size || changedW < pnet_size)
continue;
float cur_scale_x = (float)width / changedW;
float cur_scale_y = (float)height / changedH;
int count = 0;
//score p
int scoreH = mapH[i];
int scoreW = mapW[i];
const float *p = &maps[i][0];
if (scoreW <= block_size && scoreH < block_size)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
for (int row = 0; row < scoreH; row++)
{
for (int col = 0; col < scoreW; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bounding_boxes[i].push_back(bbox);
bounding_scores[i].push_back(order);
count++;
}
p ++;
}
}
int before_count = bounding_boxes[i].size();
ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int after_count = bounding_boxes[i].size();
for (int j = 0; j < after_count; j++)
{
ZQ_CNN_BBox& bbox = bounding_boxes[i][j];
bbox.row1 = round(bbox.row1 *cur_scale_y);
bbox.col1 = round(bbox.col1 *cur_scale_x);
bbox.row2 = round(bbox.row2 *cur_scale_y);
bbox.col2 = round(bbox.col2 *cur_scale_x);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
}
double t14 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count);
}
else
{
int before_count = 0, after_count = 0;
int block_H_num = __max(1, scoreH / block_size);
int block_W_num = __max(1, scoreW / block_size);
int block_num = block_H_num*block_W_num;
int width_per_block = scoreW / block_W_num;
int height_per_block = scoreH / block_H_num;
std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num);
std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num);
std::vector<int> block_start_w(block_num), block_end_w(block_num);
std::vector<int> block_start_h(block_num), block_end_h(block_num);
for (int bh = 0; bh < block_H_num; bh++)
{
for (int bw = 0; bw < block_W_num; bw++)
{
int bb = bh * block_W_num + bw;
block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size);
block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block);
block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size);
block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block);
}
}
int chunk_size = 1;// ceil((float)block_num / thread_num);
if (thread_num <= 1)
{
for (int bb = 0; bb < block_num; bb++)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
int count = 0;
for (int row = block_start_h[bb]; row < block_end_h[bb]; row++)
{
p = &maps[i][0] + row*scoreW + block_start_w[bb];
for (int col = block_start_w[bb]; col < block_end_w[bb]; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
tmp_bounding_boxes[bb].push_back(bbox);
tmp_bounding_scores[bb].push_back(order);
count++;
}
p++;
}
}
int tmp_before_count = tmp_bounding_boxes[bb].size();
ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int tmp_after_count = tmp_bounding_boxes[bb].size();
before_count += tmp_before_count;
after_count += tmp_after_count;
}
}
else
{
#pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num)
for (int bb = 0; bb < block_num; bb++)
{
ZQ_CNN_BBox bbox;
ZQ_CNN_OrderScore order;
int count = 0;
for (int row = block_start_h[bb]; row < block_end_h[bb]; row++)
{
const float* p = &maps[i][0] + row*scoreW + block_start_w[bb];
for (int col = block_start_w[bb]; col < block_end_w[bb]; col++)
{
if (*p > thresh[0])
{
bbox.score = *p;
order.score = *p;
order.oriOrder = count;
bbox.row1 = stride*row;
bbox.col1 = stride*col;
bbox.row2 = stride*row + cellsize;
bbox.col2 = stride*col + cellsize;
bbox.exist = true;
bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size)
&& (col >= border_size && col < scoreW - border_size);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
tmp_bounding_boxes[bb].push_back(bbox);
tmp_bounding_scores[bb].push_back(order);
count++;
}
p++;
}
}
int tmp_before_count = tmp_bounding_boxes[bb].size();
ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count);
int tmp_after_count = tmp_bounding_boxes[bb].size();
before_count += tmp_before_count;
after_count += tmp_after_count;
}
}
count = 0;
for (int bb = 0; bb < block_num; bb++)
{
std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin();
for (; it != tmp_bounding_boxes[bb].end(); it++)
{
if ((*it).exist)
{
bounding_boxes[i].push_back(*it);
order.score = (*it).score;
order.oriOrder = count;
bounding_scores[i].push_back(order);
count++;
}
}
}
//ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0);
after_count = bounding_boxes[i].size();
for (int j = 0; j < after_count; j++)
{
ZQ_CNN_BBox& bbox = bounding_boxes[i][j];
bbox.row1 = round(bbox.row1 *cur_scale_y);
bbox.col1 = round(bbox.col1 *cur_scale_x);
bbox.row2 = round(bbox.row2 *cur_scale_y);
bbox.col2 = round(bbox.col2 *cur_scale_x);
bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1);
}
double t14 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count);
}
}
std::vector<ZQ_CNN_OrderScore> firstOrderScore;
int count = 0;
for (int i = 0; i < scales.size(); i++)
{
std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin();
for (; it != bounding_boxes[i].end(); it++)
{
if ((*it).exist)
{
firstBbox.push_back(*it);
order.score = (*it).score;
order.oriOrder = count;
firstOrderScore.push_back(order);
count++;
}
}
}
//the first stage's nms
if (count < 1) return false;
double t15 = omp_get_wtime();
ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1);
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height,true);
double t16 = omp_get_wtime();
if (show_debug_info)
printf("nms cost: %.3f ms\n", 1000 * (t16 - t15));
if (show_debug_info)
printf("first stage candidate count: %d\n", count);
double t3 = omp_get_wtime();
if (show_debug_info)
printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t2));
return true;
}
bool _Rnet_stage(std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox)
{
double t3 = omp_get_wtime();
secondBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin();
std::vector<ZQ_CNN_OrderScore> secondScore;
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int r_count = 0;
for (; it != firstBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
r_count++;
secondBbox.push_back(*it);
}
}
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)r_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)r_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_rnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(r_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_secondBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_secondBbox[i][j] = secondBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
rnet[0].Forward(task_rnet_images[pp]);
const ZQ_CNN_Tensor4D* score = rnet[0].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = rnet[0].GetBlobByName("conv5-2");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int task_count = 0;
for (int i = 0; i < task_secondBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[1])
{
for (int j = 0; j < 4; j++)
task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_secondBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_secondBbox[pp].clear();
continue;
}
for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_secondBbox[pp][i].exist)
task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
rnet[thread_id].Forward(task_rnet_images[pp]);
const ZQ_CNN_Tensor4D* score = rnet[thread_id].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = rnet[thread_id].GetBlobByName("conv5-2");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int task_count = 0;
for (int i = 0; i < task_secondBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[1])
{
for (int j = 0; j < 4; j++)
task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_secondBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_secondBbox[pp].clear();
continue;
}
for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_secondBbox[pp][i].exist)
task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i);
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_secondBbox[i].size();
}
secondBbox.resize(count);
secondScore.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_secondBbox[i].size(); j++)
{
secondBbox[id] = task_secondBbox[i][j];
secondScore[id].score = secondBbox[id].score;
secondScore[id].oriOrder = id;
id++;
}
}
//ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union");
ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min");
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true);
count = secondBbox.size();
double t4 = omp_get_wtime();
if (show_debug_info)
printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count);
if (show_debug_info)
printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3));
return true;
}
bool _Onet_stage(std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox)
{
double t4 = omp_get_wtime();
thirdBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin();
std::vector<ZQ_CNN_OrderScore> thirdScore;
std::vector<ZQ_CNN_BBox> early_accept_thirdBbox;
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int o_count = 0;
for (; it != secondBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
if (!do_landmark && it->score > early_accept_thresh)
{
early_accept_thirdBbox.push_back(*it);
}
else
{
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
o_count++;
thirdBbox.push_back(*it);
}
}
}
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)o_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)o_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_onet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(o_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_thirdBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_thirdBbox[i][j] = thirdBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
onet[0].Forward(task_onet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* score = onet[0].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = onet[0].GetBlobByName("conv6-2");
const ZQ_CNN_Tensor4D* keyPoint = onet[0].GetBlobByName("conv6-3");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
const float* keyPoint_ptr = 0;
if (keyPoint != 0)
keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int keyPoint_sliceStep = 0;
if (keyPoint != 0)
keyPoint_sliceStep = keyPoint->GetSliceStep();
int task_count = 0;
ZQ_CNN_OrderScore order;
for (int i = 0; i < task_thirdBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[2])
{
for (int j = 0; j < 4; j++)
task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
if (keyPoint != 0)
{
for (int num = 0; num < 5; num++)
{
task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 +
(task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 +
(task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_thirdBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_thirdBbox[pp].clear();
continue;
}
for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_thirdBbox[pp][i].exist)
task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i);
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
onet[thread_id].Forward(task_onet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* score = onet[thread_id].GetBlobByName("prob1");
const ZQ_CNN_Tensor4D* location = onet[thread_id].GetBlobByName("conv6-2");
const ZQ_CNN_Tensor4D* keyPoint = onet[thread_id].GetBlobByName("conv6-3");
const float* score_ptr = score->GetFirstPixelPtr();
const float* location_ptr = location->GetFirstPixelPtr();
const float* keyPoint_ptr = 0;
if (keyPoint != 0)
keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int score_sliceStep = score->GetSliceStep();
int location_sliceStep = location->GetSliceStep();
int keyPoint_sliceStep = 0;
if (keyPoint != 0)
keyPoint_sliceStep = keyPoint->GetSliceStep();
int task_count = 0;
ZQ_CNN_OrderScore order;
for (int i = 0; i < task_thirdBbox[pp].size(); i++)
{
if (score_ptr[i*score_sliceStep + 1] > thresh[2])
{
for (int j = 0; j < 4; j++)
task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j];
if (keyPoint != 0)
{
for (int num = 0; num < 5; num++)
{
task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 +
(task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 +
(task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i];
task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1];
task_count++;
}
else
{
task_thirdBbox[pp][i].exist = false;
}
}
if (task_count < 1)
{
task_thirdBbox[pp].clear();
continue;
}
for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--)
{
if (!task_thirdBbox[pp][i].exist)
task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i);
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_thirdBbox[i].size();
}
thirdBbox.resize(count);
thirdScore.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_thirdBbox[i].size(); j++)
{
thirdBbox[id] = task_thirdBbox[i][j];
thirdScore[id].score = task_thirdBbox[i][j].score;
thirdScore[id].oriOrder = id;
id++;
}
}
ZQ_CNN_OrderScore order;
for (int i = 0; i < early_accept_thirdBbox.size(); i++)
{
order.score = early_accept_thirdBbox[i].score;
order.oriOrder = count++;
thirdScore.push_back(order);
thirdBbox.push_back(early_accept_thirdBbox[i]);
}
ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false);
ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min");
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count);
if (show_debug_info)
printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
bool _Lnet_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox)
{
double t4 = omp_get_wtime();
fourthBbox.clear();
std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin();
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int l_count = 0;
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
l_count++;
fourthBbox.push_back(*it);
}
}
}
std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox;
ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height);
for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)l_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)l_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(l_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_fourthBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_fourthBbox[i][j] = copy_fourthBbox[st_id + j];
}
}
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[0].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < 5; num++)
{
task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num) schedule(dynamic,1)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[thread_id].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < 5; num++)
{
task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num];
task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5];
}
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_fourthBbox[i].size();
}
fourthBbox.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_fourthBbox[i].size(); j++)
{
memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10);
id++;
}
}
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Lnet [%d] times \n", l_count);
if (show_debug_info)
printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
bool _Lnet106_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox)
{
double t4 = omp_get_wtime();
std::vector<ZQ_CNN_BBox> fourthBbox;
std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin();
std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h;
int l_count = 0;
for (; it != thirdBbox.end(); it++)
{
if ((*it).exist)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size)
{
(*it).exist = false;
continue;
}
else
{
l_count++;
fourthBbox.push_back(*it);
}
}
}
std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox;
ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height);
for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it)
{
int off_x = it->col1;
int off_y = it->row1;
int rect_w = it->col2 - off_x;
int rect_h = it->row2 - off_y;
src_off_x.push_back(off_x);
src_off_y.push_back(off_y);
src_rect_w.push_back(rect_w);
src_rect_h.push_back(rect_h);
}
int batch_size = BATCH_SIZE;
int per_num = ceil((float)l_count / thread_num);
int need_thread_num = thread_num;
if (per_num > batch_size)
{
need_thread_num = ceil((float)l_count / batch_size);
per_num = batch_size;
}
std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num);
std::vector<std::vector<int> > task_src_off_x(need_thread_num);
std::vector<std::vector<int> > task_src_off_y(need_thread_num);
std::vector<std::vector<int> > task_src_rect_w(need_thread_num);
std::vector<std::vector<int> > task_src_rect_h(need_thread_num);
std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num);
for (int i = 0; i < need_thread_num; i++)
{
int st_id = per_num*i;
int end_id = __min(l_count, per_num*(i + 1));
int cur_num = end_id - st_id;
if (cur_num > 0)
{
task_src_off_x[i].resize(cur_num);
task_src_off_y[i].resize(cur_num);
task_src_rect_w[i].resize(cur_num);
task_src_rect_h[i].resize(cur_num);
task_fourthBbox[i].resize(cur_num);
for (int j = 0; j < cur_num; j++)
{
task_src_off_x[i][j] = src_off_x[st_id + j];
task_src_off_y[i][j] = src_off_y[st_id + j];
task_src_rect_w[i][j] = src_rect_w[st_id + j];
task_src_rect_h[i][j] = src_rect_h[st_id + j];
task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1;
task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2;
task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1;
task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2;
task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area;
task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score;
task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist;
}
}
}
resultBbox.resize(l_count);
for (int i = 0; i < l_count; i++)
{
resultBbox[i].col1 = fourthBbox[i].col1;
resultBbox[i].col2 = fourthBbox[i].col2;
resultBbox[i].row1 = fourthBbox[i].row1;
resultBbox[i].row2 = fourthBbox[i].row2;
resultBbox[i].score = fourthBbox[i].score;
resultBbox[i].exist = fourthBbox[i].exist;
resultBbox[i].area = fourthBbox[i].area;
}
if (thread_num <= 1)
{
for (int pp = 0; pp < need_thread_num; pp++)
{
if (task_src_off_x[pp].size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[0].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[0].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keypoint_num = keyPoint->GetC() / 2;
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < keypoint_num; num++)
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2];
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1];
}
}
}
}
else
{
#pragma omp parallel for num_threads(thread_num)
for (int pp = 0; pp < need_thread_num; pp++)
{
int thread_id = omp_get_thread_num();
if (task_src_off_x.size() == 0)
continue;
if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0,
task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp]))
{
continue;
}
double t31 = omp_get_wtime();
lnet[thread_id].Forward(task_lnet_images[pp]);
double t32 = omp_get_wtime();
const ZQ_CNN_Tensor4D* keyPoint = lnet[thread_id].GetBlobByName("conv6-3");
const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr();
int keypoint_num = keyPoint->GetC() / 2;
int keyPoint_sliceStep = keyPoint->GetSliceStep();
for (int i = 0; i < task_fourthBbox[pp].size(); i++)
{
for (int num = 0; num < keypoint_num; num++)
{
task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 +
(task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2];
task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 +
(task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1];
}
}
}
}
int count = 0;
for (int i = 0; i < need_thread_num; i++)
{
count += task_fourthBbox[i].size();
}
resultBbox.resize(count);
int id = 0;
for (int i = 0; i < need_thread_num; i++)
{
for (int j = 0; j < task_fourthBbox[i].size(); j++)
{
memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212);
id++;
}
}
double t5 = omp_get_wtime();
if (show_debug_info)
printf("run Lnet [%d] times \n", l_count);
if (show_debug_info)
printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4));
return true;
}
void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height)
{
int in_num = bbox.size();
if (limit_num >= in_num)
return;
bbox.resize(limit_num);
}
};
}
#endif
|
GB_matvec_type.c | //------------------------------------------------------------------------------
// GB_matvec_type: return the type of a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GB_matvec_type // get the type of a matrix
(
GrB_Type *type, // returns the type of the matrix
const GrB_Matrix A, // matrix to query
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_RETURN_IF_NULL (type) ;
ASSERT_MATRIX_OK (A, "A for type", GB0) ;
//--------------------------------------------------------------------------
// return the type
//--------------------------------------------------------------------------
(*type) = A->type ;
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
19_omp_first_priv_nested.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst
// REQUIRES: openmp
// clang-format on
#include "omp.h"
extern void MPI_Send(void*, int);
void func(int* x, int* e) {
// firstprivate > every thread has a private copy of addr(!) x
// check-inst: define {{.*}} @func
// check-inst-NOT: call void @__typeart_alloc_stack
#pragma omp parallel for firstprivate(x), shared(e)
for (int i = 0; i < 10; ++i) {
// Analysis should not filter x, but e...
MPI_Send((void*)x, *e);
}
}
void foo() {
// check-inst: define {{.*}} @foo
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
int x = 1;
int y = 2;
#pragma omp parallel
{ func(&x, &y); }
}
void func_other(int* x, int* e) {
// firstprivate > every thread has a private copy of addr(!) x
// check-inst: define {{.*}} @func_other
// check-inst-NOT: call void @__typeart_alloc_stack
#pragma omp parallel for firstprivate(x), shared(e)
for (int i = 0; i < 10; ++i) {
// Analysis should not filter x, but e...
MPI_Send(x, *e);
}
MPI_Send(x, *e);
}
void bar(int x_other) {
// check-inst: define {{.*}} @bar
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
int x = x_other;
int y = 2;
#pragma omp parallel
{ func_other(&x, &y); }
}
// CHECK: TypeArtPass [Heap & Stack]
// CHECK-NEXT: Malloc : 0
// CHECK-NEXT: Free : 0
// CHECK-NEXT: Alloca : 2
// CHECK-NEXT: Global : 0
|
trilinos_residualbased_incrementalupdate_variable_property_static_scheme.h | // KRATOS _____ _ _ _
// |_ _| __(_) (_)_ __ ___ ___
// | || '__| | | | '_ \ / _ \/ __|
// | || | | | | | | | | (_) \__
// |_||_| |_|_|_|_| |_|\___/|___/ APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_TRILINOS_STATIC_SCHEME_VARIABLE_PROPERTY )
#define KRATOS_TRILINOS_STATIC_SCHEME_VARIABLE_PROPERTY
/* System includes */
/* External includes */
#include "Epetra_Import.h"
/* Project includes */
#include "includes/c2c_variables.h"
#include "includes/convection_diffusion_settings.h"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
This class provides the implementation of the basic tasks that are needed by the solution strategy.
It is intended to be the place for tailoring the solution strategies to problem specific tasks.
Detail class definition.
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace //= DenseSpace<double>
>
class TrilinosResidualBasedIncrementalUpdateStaticVariablePropertyScheme : public ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace,TDenseSpace>
{
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION( TrilinosResidualBasedIncrementalUpdateStaticVariablePropertyScheme);
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor.
*/
TrilinosResidualBasedIncrementalUpdateStaticVariablePropertyScheme():
ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace,TDenseSpace>()
{}
/** Destructor.
*/
virtual ~TrilinosResidualBasedIncrementalUpdateStaticVariablePropertyScheme() {}
/*@} */
/**@name Operators
*/
/*@{ */
// /**
// Performing the Initialize of the solution.
// */
// void Initialize(
// ModelPart& r_model_part
// ) {
// //Initialize variables
// ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// ConvectionDiffusionSettings::Pointer my_settings = CurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
// const Variable<double>& rUnknownVar = my_settings->GetUnknownVariable();
// const Variable<double>& rDensityVar = my_settings->GetDensityVariable();
// const Variable<double>& rDiffusionVar = my_settings->GetDiffusionVariable();
// const Variable<double>& rTransferCoef = my_settings->GetTransferCoefficientVariable();
// // const double amb_temp = CurrentProcessInfo[AMBIENT_TEMPERATURE];
// ModelPart::TableType rDensityVar_table = r_model_part.GetTable(1);
// ModelPart::TableType C_table = r_model_part.GetTable(2);
// ModelPart::TableType F_table = r_model_part.GetTable(3);
// ModelPart::TableType DF_DT_table = r_model_part.GetTable(4);
// ModelPart::TableType rDiffusionVar_table = r_model_part.GetTable(5);
// ModelPart::TableType HTC_table = r_model_part.GetTable(7);
// double density_var = r_model_part.GetProcessInfo()[DENSITY];
//// for(typename ModelPart::NodesContainerType::iterator ind=r_model_part.NodesBegin(); ind != r_model_part.NodesEnd();ind++)
// #pragma omp parallel for
// for (int k = 0; k< static_cast<int> (r_model_part.Nodes().size()); k++)
// {
// ModelPart::NodesContainerType::iterator ind = r_model_part.NodesBegin() + k;
// const double unknown_val = ind->FastGetSolutionStepValue(rUnknownVar);
// const double dist = ind->FastGetSolutionStepValue(DISTANCE);
// /*double htc_var = ind->FastGetSolutionStepValue(rTransferCoef);
// double rho = rDensityVar_table.GetValue(unknown_val);
// double cc =C_table.GetValue(unknown_val);
// ind->FastGetSolutionStepValue(rTransferCoef) = htc_var/(rho*cc); */
// double htc_var = HTC_table.GetValue(unknown_val);
// ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
// if(dist < 0){
// // double density_var = rDensityVar_table.GetValue(unknown_val);
// double specific_heat_var =C_table.GetValue(unknown_val);
// double solid_fraction_var = F_table.GetValue(unknown_val);
// double solid_fraction_rate_var = DF_DT_table.GetValue(unknown_val);
// double conductvity_var = rDiffusionVar_table.GetValue(unknown_val);
// //double htc_var = HTC_table.GetValue(unknown_val);
// ind->FastGetSolutionStepValue(rDensityVar) = density_var;
// ind->FastGetSolutionStepValue(rDensityVar,1) = density_var;
// ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_var;
// ind->FastGetSolutionStepValue(SPECIFIC_HEAT,1) = specific_heat_var;
// ind->FastGetSolutionStepValue(SOLIDFRACTION) = solid_fraction_var;
// ind->FastGetSolutionStepValue(SOLIDFRACTION,1) = solid_fraction_var;
// ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = solid_fraction_rate_var;
// ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE,1) = solid_fraction_rate_var;
// ind->FastGetSolutionStepValue(rDiffusionVar) = conductvity_var;
// ind->FastGetSolutionStepValue(rDiffusionVar,1) = conductvity_var;
// // ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
// }
// else
// {
// ind->FastGetSolutionStepValue(rDensityVar) = 1.0;
// ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = 1000.0;
// ind->FastGetSolutionStepValue(SOLIDFRACTION) = 1.0;
// ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = 0.0;
// ind->FastGetSolutionStepValue(rDiffusionVar) = 1.0;
// ind->FastGetSolutionStepValue(rTransferCoef) = 1.0;
// // ind->FastGetSolutionStepValue(rUnknownVar) = amb_temp;
// }
// }
//// mSchemeIsInitialized = true;
// }
void Initialize(
ModelPart& r_model_part
) override
{
//Initialize variables
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
ConvectionDiffusionSettings::Pointer my_settings = CurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
const Variable<double>& rUnknownVar = my_settings->GetUnknownVariable();
const Variable<double>& rDensityVar = my_settings->GetDensityVariable();
const Variable<double>& rDiffusionVar = my_settings->GetDiffusionVariable();
// const Variable<double>& rTransferCoef = my_settings->GetTransferCoefficientVariable();
// const double amb_temp = CurrentProcessInfo[AMBIENT_TEMPERATURE];
ModelPart::TableType rDensityVar_table = r_model_part.GetTable(1);
ModelPart::TableType C_table = r_model_part.GetTable(2);
ModelPart::TableType F_table = r_model_part.GetTable(3);
ModelPart::TableType DF_DT_table = r_model_part.GetTable(4);
ModelPart::TableType rDiffusionVar_table = r_model_part.GetTable(5);
//ModelPart::TableType HTC_table = r_model_part.GetTable(7);
double density_var = r_model_part.GetProcessInfo()[DENSITY];
const double latent_heat = r_model_part.GetProcessInfo()[LATENT_HEAT];
const unsigned int buffer_size = r_model_part.GetBufferSize();
// for(typename ModelPart::NodesContainerType::iterator ind=r_model_part.NodesBegin(); ind != r_model_part.NodesEnd();ind++)
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (r_model_part.Nodes().size()); k++)
{
ModelPart::NodesContainerType::iterator ind = r_model_part.NodesBegin() + k;
const double unknown_val = ind->FastGetSolutionStepValue(rUnknownVar);
const double dist = ind->FastGetSolutionStepValue(DISTANCE);
/*double htc_var = ind->FastGetSolutionStepValue(rTransferCoef);
double rho = rDensityVar_table.GetValue(unknown_val);
double cc =C_table.GetValue(unknown_val);
ind->FastGetSolutionStepValue(rTransferCoef) = htc_var/(rho*cc); */
double specific_heat_var =C_table.GetValue(unknown_val);
//double htc_var = HTC_table.GetValue(unknown_val);
//ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
double conductivity_var = rDiffusionVar_table.GetValue(unknown_val);
if(dist <= 0)
{
// double density_var = rDensityVar_table.GetValue(unknown_val);
// double specific_heat_var =C_table.GetValue(unknown_val);
double solid_fraction_var = F_table.GetValue(unknown_val);
double solid_fraction_rate_var = DF_DT_table.GetValue(unknown_val);
//double htc_var = HTC_table.GetValue(unknown_val);
ind->FastGetSolutionStepValue(rDensityVar) = density_var;
ind->FastGetSolutionStepValue(rDensityVar,1) = density_var;
ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_var;
ind->FastGetSolutionStepValue(SPECIFIC_HEAT,1) = specific_heat_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION) = solid_fraction_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION,1) = solid_fraction_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = solid_fraction_rate_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE,1) = solid_fraction_rate_var;
ind->FastGetSolutionStepValue(rDiffusionVar) = conductivity_var;
ind->FastGetSolutionStepValue(rDiffusionVar,1) = conductivity_var;
//ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
//assign an initial value to the enthalpy
const double initial_enthalpy = specific_heat_var*unknown_val + (1.0-ind->FastGetSolutionStepValue(SOLIDFRACTION))*latent_heat;
for(unsigned int i=0; i<buffer_size; i++)
ind->FastGetSolutionStepValue(ENTHALPY,i) = initial_enthalpy;
}
else
{
const double specific_heat_air = 1000.0;
ind->FastGetSolutionStepValue(rDensityVar) = 1.0;
ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_air;
ind->FastGetSolutionStepValue(SOLIDFRACTION) = 0.0;
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = 0.0;
ind->FastGetSolutionStepValue(rDiffusionVar) = conductivity_var*1000.0; //0.05
//ind->FastGetSolutionStepValue(rTransferCoef) = 0.0; ///(density_var*specific_heat_var);
// ind->FastGetSolutionStepValue(rUnknownVar) = amb_temp;
//assign an initial value to the enthalpy
for(unsigned int i=0; i<buffer_size; i++)
ind->FastGetSolutionStepValue(ENTHALPY,i) = specific_heat_air*unknown_val;
}
}
// mSchemeIsInitialized = true;
}
//***************************************************************************
void InitializeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY
BaseType::InitializeSolutionStep(r_model_part,A,Dx,b);
//update variables based on resolved unknowns
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
ConvectionDiffusionSettings::Pointer my_settings = CurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
const Variable<double>& rUnknownVar = my_settings->GetUnknownVariable();
const Variable<double>& rDensityVar = my_settings->GetDensityVariable();
const Variable<double>& rDiffusionVar = my_settings->GetDiffusionVariable();
//const Variable<double>& rTransferCoef = my_settings->GetTransferCoefficientVariable();
//const double amb_temp = CurrentProcessInfo[AMBIENT_TEMPERATURE];
ModelPart::TableType rDensityVar_table = r_model_part.GetTable(1);
ModelPart::TableType C_table = r_model_part.GetTable(2);
ModelPart::TableType F_table = r_model_part.GetTable(3);
ModelPart::TableType DF_DT_table = r_model_part.GetTable(4);
ModelPart::TableType rDiffusionVar_table = r_model_part.GetTable(5);
ModelPart::TableType HTC_table = r_model_part.GetTable(7);
double density_var = r_model_part.GetProcessInfo()[DENSITY];
//const double latent_heat = r_model_part.GetProcessInfo()[LATENT_HEAT];
//for(typename ModelPart::NodesContainerType::iterator ind=r_model_part.NodesBegin(); ind != r_model_part.NodesEnd();ind++)
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (r_model_part.Nodes().size()); k++)
{
ModelPart::NodesContainerType::iterator ind = r_model_part.NodesBegin() + k;
const double unknown_val = ind->FastGetSolutionStepValue(rUnknownVar);
const double dist = ind->FastGetSolutionStepValue(DISTANCE);
double specific_heat_var =C_table.GetValue(unknown_val);
//double htc_var = HTC_table.GetValue(unknown_val);
//ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
if(dist <= 0)
{
//double density_var = rDensityVar_table.GetValue(unknown_val);
double solid_fraction_var = F_table.GetValue(unknown_val);
double solid_fraction_rate_var = DF_DT_table.GetValue(unknown_val);
double conductvity_var = rDiffusionVar_table.GetValue(unknown_val);
ind->FastGetSolutionStepValue(rDensityVar) = density_var;
ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION) = solid_fraction_var;
ind->GetValue(SOLIDFRACTION) = solid_fraction_var; //also save in database without history
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = solid_fraction_rate_var;
ind->FastGetSolutionStepValue(rDiffusionVar) = conductvity_var;
//ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
//here compute the ENTHALPY = int(c dT,0,T) + L(T)
//which should be computed incrementally as Hn+1 = Hn + 1/2*(Tn+1 - Tn)*(cn+1 - cn) + L(Tn+1) - L(Tn)
// const double Delta_T = unknown_val - ind->GetValue(rUnknownVar);
// const double delta_solid_fraction = 0.0; //(1-Sn+1) - (1-Sn)
const double delta_enthalpy = 0.0; //Delta_T*specific_heat_var + delta_solid_fraction*latent_heat;
ind->FastGetSolutionStepValue(ENTHALPY) = /*ind->FastGetSolutionStepValue(ENTHALPY,1) +*/ delta_enthalpy;
ind->GetValue(ENTHALPY) = ind->FastGetSolutionStepValue(ENTHALPY);
ind->GetValue(SOLIDFRACTION) = solid_fraction_var;
ind->GetValue(SOLIDFRACTION_RATE) = solid_fraction_rate_var;
}
else
{
const double conductivity_var = rDiffusionVar_table.GetValue(unknown_val);
const double specific_heat_air = 1000.0;
ind->FastGetSolutionStepValue(rDensityVar) = 1.0; //density_var; //1.0;
ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_air;
ind->FastGetSolutionStepValue(SOLIDFRACTION) = 0.0;
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = 0.0;
ind->FastGetSolutionStepValue(rDiffusionVar) = conductivity_var*1000.0; //0.05
//ind->FastGetSolutionStepValue(rTransferCoef) = 0.0; //htc_var/(50.0); //*density_var*specific_heat_air);
ind->FastGetSolutionStepValue(ENTHALPY) = specific_heat_air*unknown_val; // * (ind->FastGetSolutionStepValue(rUnknownVar)) ;
ind->GetValue(ENTHALPY) = specific_heat_air*unknown_val;
ind->GetValue(SOLIDFRACTION) = 0.0;
ind->GetValue(SOLIDFRACTION_RATE) = 0.0;
}
}
KRATOS_CATCH("")
}
/**
Performing the update of the solution.
*/
//***************************************************************************
void Update(
ModelPart& r_model_part,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
) override
{
KRATOS_TRY;
BaseType::Update(r_model_part,rDofSet,A,Dx,b);
AddiotionalTablePropertyUpdate(r_model_part);
KRATOS_CATCH("")
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
/*@} */
/**@name Private Operators*/
/*@{ */
void AddiotionalTablePropertyUpdate(ModelPart& r_model_part){
KRATOS_TRY;
//update variables based on resolved unknowns
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
ConvectionDiffusionSettings::Pointer my_settings = CurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
const Variable<double>& rUnknownVar = my_settings->GetUnknownVariable();
//const Variable<double>& rDensityVar = my_settings->GetDensityVariable();
//const Variable<double>& rDiffusionVar = my_settings->GetDiffusionVariable();
//const Variable<double>& rTransferCoef = my_settings->GetTransferCoefficientVariable();
//const double amb_temp = CurrentProcessInfo[AMBIENT_TEMPERATURE];
ModelPart::TableType rDensityVar_table = r_model_part.GetTable(1);
ModelPart::TableType C_table = r_model_part.GetTable(2);
ModelPart::TableType F_table = r_model_part.GetTable(3);
ModelPart::TableType DF_DT_table = r_model_part.GetTable(4);
ModelPart::TableType rDiffusionVar_table = r_model_part.GetTable(5);
// ModelPart::TableType HTC_table = r_model_part.GetTable(7);
//double density_var = r_model_part.GetProcessInfo()[DENSITY];
const double latent_heat = r_model_part.GetProcessInfo()[LATENT_HEAT];
//for(typename ModelPart::NodesContainerType::iterator ind=r_model_part.NodesBegin(); ind != r_model_part.NodesEnd();ind++)
#pragma omp parallel for
for (int k = 0; k< static_cast<int> (r_model_part.Nodes().size()); k++)
{
ModelPart::NodesContainerType::iterator ind = r_model_part.NodesBegin() + k;
const double unknown_val = ind->FastGetSolutionStepValue(rUnknownVar);
const double dist = ind->FastGetSolutionStepValue(DISTANCE);
// double specific_heat_var =C_table.GetValue(unknown_val);
// double htc_var = HTC_table.GetValue(unknown_val);
// ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
if(dist <= 0)
{
//double density_var = rDensityVar_table.GetValue(unknown_val);
double solid_fraction_var = F_table.GetValue(unknown_val);
double solid_fraction_rate_var = DF_DT_table.GetValue(unknown_val);
// double conductvity_var = rDiffusionVar_table.GetValue(unknown_val);
// ind->FastGetSolutionStepValue(rDensityVar) = density_var;
// ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION) = solid_fraction_var;
ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = solid_fraction_rate_var;
// ind->FastGetSolutionStepValue(rDiffusionVar) = conductvity_var;
// ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
//here compute the ENTHALPY = int(c dT,0,T) + L(T)
//which should be computed incrementally as Hn+1 = Hn + 1/2*(Tn+1 - Tn)*(cn+1 - cn) + L(Tn+1) - L(Tn)
const double Delta_T = unknown_val - ind->GetValue(rUnknownVar);
const double avg_c = ind->FastGetSolutionStepValue(SPECIFIC_HEAT);
const double delta_solid_fraction = ind->GetValue(SOLIDFRACTION) - ind->FastGetSolutionStepValue(SOLIDFRACTION); //(1-Sn+1) - (1-Sn)
const double delta_enthalpy = Delta_T*avg_c + delta_solid_fraction*latent_heat;
ind->FastGetSolutionStepValue(ENTHALPY) = /*ind->GetValue(ENTHALPY) +*/ delta_enthalpy;
}
else
{
const double specific_heat_air = ind->FastGetSolutionStepValue(SPECIFIC_HEAT); //1000.0;
// ind->FastGetSolutionStepValue(rDensityVar) = 1.0;
// ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_air;
// ind->FastGetSolutionStepValue(SOLIDFRACTION) = 0.0;
// ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = 0.0;
// ind->FastGetSolutionStepValue(rDiffusionVar) = 1.0;
// ind->FastGetSolutionStepValue(rTransferCoef) = ind->FastGetSolutionStepValue(rTransferCoef)/(density_var*specific_heat_air);
ind->FastGetSolutionStepValue(ENTHALPY) = specific_heat_air*unknown_val; // * (ind->FastGetSolutionStepValue(rUnknownVar)) ;
}
}
KRATOS_CATCH("")
// //update variables based on resolved unknowns
// ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// ConvectionDiffusionSettings::Pointer my_settings = CurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
// const Variable<double>& rUnknownVar = my_settings->GetUnknownVariable();
// const Variable<double>& rDensityVar = my_settings->GetDensityVariable();
// const Variable<double>& rDiffusionVar = my_settings->GetDiffusionVariable();
// const Variable<double>& rTransferCoef = my_settings->GetTransferCoefficientVariable();
// //const double amb_temp = CurrentProcessInfo[AMBIENT_TEMPERATURE];
// ModelPart::TableType rDensityVar_table = r_model_part.GetTable(1);
// ModelPart::TableType C_table = r_model_part.GetTable(2);
// ModelPart::TableType F_table = r_model_part.GetTable(3);
// ModelPart::TableType DF_DT_table = r_model_part.GetTable(4);
// ModelPart::TableType rDiffusionVar_table = r_model_part.GetTable(5);
// ModelPart::TableType HTC_table = r_model_part.GetTable(7);
// double density_var = r_model_part.GetProcessInfo()[DENSITY];
// //for(typename ModelPart::NodesContainerType::iterator ind=r_model_part.NodesBegin(); ind != r_model_part.NodesEnd();ind++)
// #pragma omp parallel for
// for (int k = 0; k< static_cast<int> (r_model_part.Nodes().size()); k++)
// {
// ModelPart::NodesContainerType::iterator ind = r_model_part.NodesBegin() + k;
// const double unknown_val = ind->FastGetSolutionStepValue(rUnknownVar);
// const double dist = ind->FastGetSolutionStepValue(DISTANCE);
// double htc_var = HTC_table.GetValue(unknown_val);
// ind->FastGetSolutionStepValue(rTransferCoef) = htc_var;
// if(dist < 0){
// //double density_var = rDensityVar_table.GetValue(unknown_val);
// double specific_heat_var =C_table.GetValue(unknown_val);
// double solid_fraction_var = F_table.GetValue(unknown_val);
// double solid_fraction_rate_var = DF_DT_table.GetValue(unknown_val);
// double conductvity_var = rDiffusionVar_table.GetValue(unknown_val);
// ind->FastGetSolutionStepValue(rDensityVar) = density_var;
// ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = specific_heat_var;
// ind->FastGetSolutionStepValue(SOLIDFRACTION) = solid_fraction_var;
// ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = solid_fraction_rate_var;
// ind->FastGetSolutionStepValue(rDiffusionVar) = conductvity_var;
// }
// else
// {
// ind->FastGetSolutionStepValue(rDensityVar) = 1.0;
// ind->FastGetSolutionStepValue(SPECIFIC_HEAT) = 1000.0;
// ind->FastGetSolutionStepValue(SOLIDFRACTION) = 1.0;
// ind->FastGetSolutionStepValue(SOLIDFRACTION_RATE) = 0.0;
// ind->FastGetSolutionStepValue(rDiffusionVar) = 1.0;
// ind->FastGetSolutionStepValue(rTransferCoef) = 1.0;
// //ind->FastGetSolutionStepValue(rUnknownVar) = amb_temp;
// }
// }
}
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class Scheme */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_TRILINOS_STANDARD_STATIC_SCHEME defined */
|
integrators.h | #pragma once
#include "geometries.h"
namespace integrators{
struct VelocityVerlet{
//template <void (*boundary_func)(Atom &a)>
static inline void first_step(Particles &particles, Geometry *geometry) {
/*!
* Velocity Verlet integrator
* First half step
*/
#pragma omp parallel for if(particles.atoms.numOfAtoms > 6000)
for (int i = 0; i < particles.atoms.numOfAtoms; i++) {
geometry->boundary(particles.atoms[i]);
particles.atoms[i]->vel +=
0.5 * Base::tStep * particles.atoms[i]->oldForce / particles.atoms[i]->mass; //[nm/ps]
particles.atoms[i]->pos += Base::tStep * particles.atoms[i]->vel;
particles.atoms[i]->pos = particles.atoms[i]->pos.cwiseProduct(
Base::dimensionality); //Multiply with dimensionality
if (particles.atoms[i]->pos.norm() > sqrt(3) * Base::boxDim + 1) {
printf("\nAtom outside box\n");
std::cout << particles.atoms[i]->pos << std::endl;
exit(1);
}
}
}
static inline void second_step(Particles &particles) {
/*!
* Velocity Verlet integrator
* Second half step
*/
#pragma omp parallel for if(particles.atoms.numOfAtoms > 6000)
for (int i = 0; i < particles.atoms.numOfAtoms; i++) {
particles.atoms[i]->vel +=
0.5 * Base::tStep * particles.atoms[i]->force / particles.atoms[i]->mass;
particles.atoms[i]->oldForce = particles.atoms[i]->force;
}
}
};
}
|
GB_AxB_rowscale_meta.c | //------------------------------------------------------------------------------
// GB_AxB_rowscale_meta: C=D*B where D is a square diagonal matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// All entries in C=D*B are computed entirely in parallel.
// B and C can be jumbled. D cannot, but it is a diagonal matrix so it is
// never jumbled.
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
// Bx is unused if the operator is FIRST or PAIR
#include "GB_unused.h"
ASSERT (GB_JUMBLED_OK (C)) ;
ASSERT (!GB_JUMBLED (D)) ;
ASSERT (GB_JUMBLED_OK (B)) ;
//--------------------------------------------------------------------------
// get C, D, and B
//--------------------------------------------------------------------------
const GB_ATYPE *restrict Dx = (GB_ATYPE *) (D_is_pattern ? NULL : D->x) ;
const GB_BTYPE *restrict Bx = (GB_BTYPE *) (B_is_pattern ? NULL : B->x) ;
const int64_t *restrict Bi = B->i ;
const int64_t bnz = GB_IS_FULL (B) ? GB_NNZ_FULL (B) : GB_NNZ (B) ;
const int64_t bvlen = B->vlen ;
//--------------------------------------------------------------------------
// C=D*B
//--------------------------------------------------------------------------
int ntasks = nthreads ;
ntasks = GB_IMIN (bnz, ntasks) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, bnz, tid, ntasks) ;
GB_PRAGMA_SIMD_VECTORIZE
for (int64_t p = pstart ; p < pend ; p++)
{
int64_t i = GBI (Bi, p, bvlen) ; // get row index of B(i,j)
GB_GETA (dii, Dx, i) ; // dii = D(i,i)
GB_GETB (bij, Bx, p) ; // bij = B(i,j)
GB_BINOP (GB_CX (p), dii, bij, 0, 0) ; // C(i,j) = dii*bij
}
}
}
|
stfd.c | /*
File: serialprogram.c
Author(s):
Yang Liu - University of the Pacific, ECPE 293, Spring 2017
Cody Balos - University of the Pacific, ECPE 293, Spring 2017
Description:
This program implements Shi Tomasi Feature Detection.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <omp.h>
#include "image_io.h"
#include "stfd.h"
#include "timing.h"
#if !BENCHMARKMODE
#undef TIME_BLOCK_EXEC
#define TIME_BLOCK_EXEC(msg, ...) do { \
__VA_ARGS__ \
} while(0);
#endif
int main(int argc, char **argv)
{
TIME_BLOCK_EXEC("end_to_end",
{ // START "END-TO-END" TIMING BLOCK
// User provided arguments
// path to the image to process
char *filepath = NULL;
// how much information should the program print to the console
int verbose_lvl = 0;
// sigma of the gaussian distribution
float sigma = 1.1;
// size of a pixel 'neighborhood'
int windowsize = 4;
// # of features
int max_features = 1024;
// argument parsing logic
if (argc > 1) {
if (!strcmp(argv[1], "-h")) {
help(NULL);
}
else if (!strcmp(argv[1], "-v")) {
verbose_lvl = 1;
filepath = argv[2];
if (argc >= 4)
sigma = atof(argv[3]);
if (argc >= 5)
windowsize = atof(argv[4]);
if (argc >= 6)
max_features = atoi(argv[5]);
}
else if (!strcmp(argv[1], "-vv")) {
verbose_lvl = 2;
filepath = argv[2];
if (argc >= 4)
sigma = atof(argv[3]);
if (argc >= 5)
windowsize = atof(argv[4]);
if (argc >= 6)
max_features = atoi(argv[5]);
}
else {
filepath = argv[1];
if (argc >= 3)
sigma = atof(argv[2]);
if (argc >= 4)
windowsize = atof(argv[3]);
if (argc >= 5)
max_features = atoi(argv[4]);
}
} else {
help("You must provide the path to the image to process.");
}
#if BENCHMARKMODE
verbose_lvl = 0;
#endif
if (verbose_lvl > 0) {
printf("detecting features for %s\n", filepath);
printf("sigma = %0.3f, windowsize = %d, max_features = %d\n", sigma, windowsize, max_features);
printf("max threads = %d\n", omp_get_max_threads());
}
#if BENCHMARKMODE
printf("detecting features for %s\n", filepath);
printf("sigma = %0.3f, windowsize = %d, max_features = %d\n", sigma, windowsize, max_features);
printf("max threads = %d\n", omp_get_max_threads());
#endif
int width;
int height;
int kernel_width;
int a;
// calculate kernel width based on sigma
a = (int)round(2.5 * sigma -.5);
kernel_width = 2 * a + 1;
// malloc and read the image to be processed
float *original_image;
TIME_BLOCK_EXEC("disk_IO_read",
{
read_imagef(filepath, &original_image, &width, &height);
})
#if BENCHMARKMODE
printf("image_size\n%dpx\n", width);
#endif
// malloc and generate the kernels
float *gkernel = (float *)malloc(sizeof(float) * kernel_width);
float *dkernel = (float *)malloc(sizeof(float) * kernel_width);
gen_kernel(gkernel, dkernel, sigma, a, kernel_width);
// create hgrad and vgrad and temp
float *hgrad = (float *)malloc(sizeof(float) * width * height);
float *vgrad = (float *)malloc(sizeof(float) * width * height);
float *tmp_image = (float *)malloc(sizeof(float) * width * height);
// convolve to get the vgrad and hgrad
TIME_BLOCK_EXEC("convolution",
{
convolve(gkernel, original_image, tmp_image, width, height, kernel_width, 1, a);
convolve(dkernel, tmp_image, vgrad, width, height, 1, kernel_width, a);
convolve(gkernel, original_image,tmp_image, width, height, 1, kernel_width, a);
convolve(dkernel, tmp_image, hgrad, width, height, kernel_width, 1, a);
})
free(tmp_image);
free(gkernel);
free(dkernel);
// Compute the eigenvalues of each pixel's z matrix. After this we can free the gradients.
data_wrapper_t *eigenvalues = (data_wrapper_t *)malloc(sizeof(data_wrapper_t) * width * height);
TIME_BLOCK_EXEC("compute_eigenvalues",
{
compute_eigenvalues(hgrad, vgrad, height, width, windowsize, eigenvalues);
})
free(hgrad);
free(vgrad);
// Find the features based on the eigenvalues.
data_wrapper_t *features;
unsigned int features_count;
TIME_BLOCK_EXEC("find_features",
{
features_count = find_features(eigenvalues, max_features, width, height, &features);
})
free(eigenvalues);
if (verbose_lvl > 0) {
printf("%d features detected\n", features_count);
}
if (verbose_lvl > 1) {
printf("\t");
print_features(features, features_count);
}
// Mark the features in the output image.
TIME_BLOCK_EXEC("draw_features",
{
draw_features(features, features_count, original_image, width, height);
})
free(features);
// Now we write the output.
char corner_image[30];
sprintf(corner_image, "corners.pgm");
TIME_BLOCK_EXEC("disk_IO_write",
{
write_imagef(corner_image, original_image, width, height);
})
// Free stuff leftover.
free(original_image);
}) // END "END-TO-END" TIMING BLOCK
return 0;
}
void draw_features(data_wrapper_t *features, unsigned int count, float *image, int image_width, int image_height)
{
int radius = image_width*0.0025;
for (int i = 0; i < count; ++i) {
int x = features[i].x;
int y = features[i].y;
for (int j = -1 * radius; j <= radius; j++) {
for (int k = -1 * radius; k <= radius; k++) {
if ((x+j) >= 0 && (x+j) < image_height && (y+k) >= 0 && (y+k) < image_width)
image[(x+j) * image_width + (y+k) ] = 0;
}
}
}
}
unsigned int find_features(data_wrapper_t *eigenvalues, int max_features, int image_width, int image_height, data_wrapper_t **features)
{
size_t image_size = image_height*image_width;
TIME_BLOCK_EXEC("features_sort",
{
// Sort eigenvalues in descending order while keeping their corresponding pixel index in the image.
qsort(eigenvalues, image_height*image_width, sizeof *eigenvalues, sort_data_wrapper_value_desc);
})
// Create the features buffer based on the max_features value (acts as a percentage of the image size).
*features = (data_wrapper_t*)malloc(sizeof(data_wrapper_t)*max_features);
// Fill the features buffer!
unsigned int features_count = 0;
const int ignore_x = 3; // ignore this many pixels rows from top/bottom of image
const int ignore_y = 3; // ignore this many pixels columns from left/right of image
for (int i = 0; i < image_size && features_count < max_features; ++i) {
// Ignore top left, top right, bottom right, bottom left edges of image.
if (eigenvalues[i].x <= ignore_x || eigenvalues[i].y <= ignore_y ||
eigenvalues[i].x >= image_width-1-ignore_x || eigenvalues[i].y >= image_height-1-ignore_y) {
continue;
}
// Have to seed the first feature so we have a place to start.
if (features_count == 0) {
(*features)[0] = eigenvalues[i];
features_count++;
}
// Check if prospective feature is more than 8 manhattan distance away from any existing feature.
int is_good = 1;
for (int j = 0; j < features_count; ++j) {
int manhattan = abs((*features)[j].x - eigenvalues[i].x) + abs((*features)[j].y - eigenvalues[i].y);
if (manhattan <= 8) {
is_good = 0;
break;
}
}
// If the prospective feature was at least 8 manhattan distance from all existing features, then we can add it.
if (is_good) {
(*features)[features_count] = eigenvalues[i];
features_count++;
}
}
return features_count;
}
int sort_data_wrapper_value_desc(const void *a, const void *b)
{
const data_wrapper_t *aa = (const data_wrapper_t *) a;
const data_wrapper_t *bb = (const data_wrapper_t *) b;
return (aa->data < bb->data) - (aa->data > bb->data);
}
int sort_data_wrapper_index_asc(const void *a, const void *b)
{
const data_wrapper_t *aa = (const data_wrapper_t *) a;
const data_wrapper_t *bb = (const data_wrapper_t *) b;
if (aa->x == bb->x)
return ((aa->y > bb->y) - (aa->y < bb->y));
else
return ((aa->x > bb->x) - (aa->x < bb->x));
}
void compute_eigenvalues(float *hgrad, float *vgrad, int image_height, int image_width, int windowsize, data_wrapper_t *eigenvalues)
{
int w = floor(windowsize/2);
int i, j;
#pragma omp parallel for private(j)
for (i = 0; i < image_height; i++) {
for (j = 0; j < image_width; j++) {
float ixx_sum = 0;
float iyy_sum = 0;
float ixiy_sum = 0;
for (int k = 0; k < windowsize; k++) {
for (int m = 0; m < windowsize; m++) {
int offseti = -1 * w + k;
int offsetj = -1 * w + m;
if (i+offseti >= 0 && i+offseti < image_height && j + offsetj >= 0 && j+offsetj < image_width){
ixx_sum += hgrad[(i +offseti) * image_width + (j + offsetj)] * hgrad[(i +offseti) * image_width + (j + offsetj)];
iyy_sum += vgrad[(i +offseti) * image_width + (j + offsetj)] * vgrad[(i +offseti) * image_width + (j + offsetj)];
ixiy_sum += hgrad[(i +offseti) * image_width + (j + offsetj)] * vgrad[(i +offseti) * image_width + (j + offsetj)];
}
}
}
eigenvalues[i*image_width+j].x = i;
eigenvalues[i*image_width+j].y = j;
eigenvalues[i*image_width+j].data = min_eigenvalue(ixx_sum, ixiy_sum, ixiy_sum, iyy_sum);
}
}
}
float min_eigenvalue(float a, float b, float c, float d)
{
float ev_one = (a + d)/2 + pow(((a + d) * (a + d))/4 - (a * d - b * c), 0.5);
float ev_two = (a + d)/2 - pow(((a + d) * (a + d))/4 - (a * d - b * c), 0.5);
if (ev_one >= ev_two){
return ev_two;
}
else{
return ev_one;
}
}
void convolve(float *kernel, float *image, float *resultimage, int image_width, int image_height, int kernel_width, int kernel_height, int half)
{
int i, j;
#pragma omp parallel for private(j)
for (i = 0; i < image_height; i++) {
for (j = 0; j < image_width; j++) {
// reset accumulator when "focused" pixel changes
float sum = 0.0;
// for each item in the kernel
for (int k = 0; k < kernel_height; k++) {
for (int m = 0; m < kernel_width; m++) {
int offseti = -1 * (kernel_height/2) + k;
int offsetj = -1 * (kernel_width/2) + m;
// Check to make sure we are in the bounds of the image.
if (i+offseti >= 0 && i+offseti < image_height && j + offsetj >= 0 && j+offsetj < image_width)
sum+=(float)(image[(i+offseti) * image_width + (j+offsetj)])*kernel[k*kernel_width +m];
}
}
resultimage[i * image_width + j] = sum;
}
}
}
void gen_kernel(float *gkernel, float *dkernel, float sigma, int a, int w)
{
int i;
float sum_gkern = 0;
float sum_dkern = 0;
for (i = 0; i < w; i++) {
gkernel[i] = (float)exp( (float)(-1.0 * (i-a) * (i-a)) / (2 * sigma * sigma));
dkernel[i] = (float)(-1 * (i - a)) * (float)exp( (float)(-1.0 * (i-a) * (i-a)) / (2 * sigma * sigma));
sum_gkern = sum_gkern + gkernel[i];
sum_dkern = sum_dkern - (float)i * dkernel[i];
}
//reverse the kernel by creating a new kernel, yes not ideal
float *newkernel = (float *)malloc(sizeof(float) * w);
for (i = 0; i < w; i++) {
dkernel[i] = dkernel[i] / sum_dkern;
gkernel[i] = gkernel[i] / sum_gkern;
newkernel[w-i] = dkernel[i];
}
//copy new kernel back in
for (i = 0; i < w; i++)
dkernel[i] = newkernel[i+1];
free(newkernel);
}
void help(const char *err)
{
if (err != NULL)
printf("%s\n", err);
printf("usage: ./stfd [-v,-vv] <full path to the image> [sigma] [windowsize] [num_features] \n");
printf("flags:\n");
printf("\t-h: show this help menu\n");
printf("\t-v: output basic execution information\n");
printf("\t-vv: output all information... good for debugging\n");
printf("arguments:\n");
printf("\tsigma: the sigma value for the Gaussian distribution used to form the convolution mask.\n");
printf("\twindowsize: the size of a pixel 'neighborhood' in an image\n");
printf("\tnum_features: how many features to extract\n");
exit(0);
}
void print_features(data_wrapper_t *features, unsigned int count)
{
// Sort the features by
qsort(features, count, sizeof *features, sort_data_wrapper_index_asc);
for (unsigned int i = 0; i < count; ++i) {
if (i % 15 != 0 || i == 0)
printf("(%d,%d) ", features[i].x, features[i].y);
else
printf("(%d,%d)\n\t", features[i].x, features[i].y);
}
printf("\n");
}
|
bodysystemcpu_impl.h | /*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "bodysystemcpu.h"
#include <assert.h>
#include <memory.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <helper_cuda.h>
#include <algorithm>
#include "tipsy.h"
#ifdef OPENMP
#include <omp.h>
#endif
template <typename T>
BodySystemCPU<T>::BodySystemCPU(int numBodies)
: m_numBodies(numBodies),
m_bInitialized(false),
m_force(0),
m_softeningSquared(.00125f),
m_damping(0.995f)
{
m_pos = 0;
m_vel = 0;
_initialize(numBodies);
}
template <typename T>
BodySystemCPU<T>::~BodySystemCPU()
{
_finalize();
m_numBodies = 0;
}
template <typename T>
void BodySystemCPU<T>::_initialize(int numBodies)
{
assert(!m_bInitialized);
m_numBodies = numBodies;
m_pos = new T[m_numBodies*4];
m_vel = new T[m_numBodies*4];
m_force = new T[m_numBodies*3];
memset(m_pos, 0, m_numBodies*4*sizeof(T));
memset(m_vel, 0, m_numBodies*4*sizeof(T));
memset(m_force, 0, m_numBodies*3*sizeof(T));
m_bInitialized = true;
}
template <typename T>
void BodySystemCPU<T>::_finalize()
{
assert(m_bInitialized);
delete [] m_pos;
delete [] m_vel;
delete [] m_force;
m_bInitialized = false;
}
template <typename T>
void BodySystemCPU<T>::loadTipsyFile(const std::string &filename)
{
if (m_bInitialized)
_finalize();
vector< typename vec4<T>::Type > positions;
vector< typename vec4<T>::Type > velocities;
vector< int> ids;
int nBodies = 0;
int nFirst=0, nSecond=0, nThird=0;
read_tipsy_file(positions,
velocities,
ids,
filename,
nBodies,
nFirst,
nSecond,
nThird);
_initialize(nBodies);
memcpy(m_pos, &positions[0], sizeof(vec4<T>)*nBodies);
memcpy(m_vel, &velocities[0], sizeof(vec4<T>)*nBodies);
}
template <typename T>
void BodySystemCPU<T>::update(T deltaTime)
{
assert(m_bInitialized);
_integrateNBodySystem(deltaTime);
//std::swap(m_currentRead, m_currentWrite);
}
template <typename T>
T *BodySystemCPU<T>::getArray(BodyArray array)
{
assert(m_bInitialized);
T *data = 0;
switch (array)
{
default:
case BODYSYSTEM_POSITION:
data = m_pos;
break;
case BODYSYSTEM_VELOCITY:
data = m_vel;
break;
}
return data;
}
template <typename T>
void BodySystemCPU<T>::setArray(BodyArray array, const T *data)
{
assert(m_bInitialized);
T *target = 0;
switch (array)
{
default:
case BODYSYSTEM_POSITION:
target = m_pos;
break;
case BODYSYSTEM_VELOCITY:
target = m_vel;
break;
}
memcpy(target, data, m_numBodies*4*sizeof(T));
}
template<typename T>
T sqrt_T(T x)
{
return sqrt(x);
}
template<>
float sqrt_T<float>(float x)
{
return sqrtf(x);
}
template <typename T>
void bodyBodyInteraction(T accel[3], T posMass0[4], T posMass1[4], T softeningSquared)
{
T r[3];
// r_01 [3 FLOPS]
r[0] = posMass1[0] - posMass0[0];
r[1] = posMass1[1] - posMass0[1];
r[2] = posMass1[2] - posMass0[2];
// d^2 + e^2 [6 FLOPS]
T distSqr = r[0] * r[0] + r[1] * r[1] + r[2] * r[2];
distSqr += softeningSquared;
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
T invDist = (T)1.0 / (T)sqrt((double)distSqr);
T invDistCube = invDist * invDist * invDist;
// s = m_j * invDistCube [1 FLOP]
T s = posMass1[3] * invDistCube;
// (m_1 * r_01) / (d^2 + e^2)^(3/2) [6 FLOPS]
accel[0] += r[0] * s;
accel[1] += r[1] * s;
accel[2] += r[2] * s;
}
template <typename T>
void BodySystemCPU<T>::_computeNBodyGravitation()
{
#ifdef OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < m_numBodies; i++)
{
int indexForce = 3*i;
T acc[3] = {0, 0, 0};
// We unroll this loop 4X for a small performance boost.
int j = 0;
while (j < m_numBodies)
{
bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared);
j++;
bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared);
j++;
bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared);
j++;
bodyBodyInteraction<T>(acc, &m_pos[4*i], &m_pos[4*j], m_softeningSquared);
j++;
}
m_force[indexForce ] = acc[0];
m_force[indexForce+1] = acc[1];
m_force[indexForce+2] = acc[2];
}
}
template <typename T>
void BodySystemCPU<T>::_integrateNBodySystem(T deltaTime)
{
_computeNBodyGravitation();
#ifdef OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < m_numBodies; ++i)
{
int index = 4*i;
int indexForce = 3*i;
T pos[3], vel[3], force[3];
pos[0] = m_pos[index+0];
pos[1] = m_pos[index+1];
pos[2] = m_pos[index+2];
T invMass = m_pos[index+3];
vel[0] = m_vel[index+0];
vel[1] = m_vel[index+1];
vel[2] = m_vel[index+2];
force[0] = m_force[indexForce+0];
force[1] = m_force[indexForce+1];
force[2] = m_force[indexForce+2];
// acceleration = force / mass;
// new velocity = old velocity + acceleration * deltaTime
vel[0] += (force[0] * invMass) * deltaTime;
vel[1] += (force[1] * invMass) * deltaTime;
vel[2] += (force[2] * invMass) * deltaTime;
vel[0] *= m_damping;
vel[1] *= m_damping;
vel[2] *= m_damping;
// new position = old position + velocity * deltaTime
pos[0] += vel[0] * deltaTime;
pos[1] += vel[1] * deltaTime;
pos[2] += vel[2] * deltaTime;
m_pos[index+0] = pos[0];
m_pos[index+1] = pos[1];
m_pos[index+2] = pos[2];
m_vel[index+0] = vel[0];
m_vel[index+1] = vel[1];
m_vel[index+2] = vel[2];
}
}
|
SE1P_direct_real.c | #include "mex.h"
#include "SE_direct.h"
#define IDX prhs[0]
#define X prhs[1] // Source locations
#define Q prhs[2] // Source strengths
#define OPT prhs[3] // Parameters
#define PHI plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
/* common option-unpacking */
void unpack_opt(ewald_opts* opt, const mxArray* mx_opt)
{
// mandatory options -- will trigger core dump if missing
opt->xi = mxGetScalar(mxGetField(mx_opt,0,"xi"));
if(opt->xi==0)
mexErrMsgTxt("xi cannot be zero");
double* box = mxGetPr(mxGetField(mx_opt,0,"box"));
opt->box[0] = box[0];
// layers: mandatory for ewald sums that are truncated
const mxArray* mx_layers = mxGetField(mx_opt,0,"layers");
if(mx_layers)
opt->layers = (int)mxGetScalar(mx_layers);
else
opt->layers = -1;
}
// MATLAB (one-based, doubles) to C (zero-based, integers) index translation
void index_translation(int* idx, const double* idx_d, int N)
{
for(int i=0; i<N; i++)
idx[i] = (int)idx_d[i] - 1;
}
#ifdef FORCE
void SE1P_direct_real(double* restrict force,
const int* restrict idx, int nidx,
const double* restrict x,
const double* restrict q, int N,
const ewald_opts opt)
{
double xi = opt.xi;
double xi2 = xi*xi;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int m=0; m<nidx; m++)
{
double f[] = {0,0,0};
double xm[] = {x[idx[m]],x[idx[m]+N],x[idx[m]+2*N]};
for(int n=0; n<N; n++)
{
double rvec[] = {xm[0]-x[n], xm[1]-x[n+N], xm[2]-x[n+2*N]};
double qn = q[n];
for(int p0 = -opt.layers; p0<=opt.layers; p0++)
{
if(idx[m] == n && p0 == 0)
continue;
double rvp[] = {rvec[0]+p0*opt.box[0],rvec[1],rvec[2]};
double r = sqrt(rvp[0]*rvp[0]+rvp[1]*rvp[1]+rvp[2]*rvp[2]);
double r2 = r*r;
double c = qn*(2*xi/sqrt(PI)*exp(-xi2*r2)+ erfc(xi*r)/r)/r2;
f[0] += c*rvp[0];
f[1] += c*rvp[1];
f[2] += c*rvp[2];
}
}
force[m ] = -f[0];
force[m+ nidx] = -f[1];
force[m+2*nidx] = -f[2];
}
}
#else
void SE1P_direct_real(double* restrict phi,
const int* restrict idx, int nidx,
const double* restrict x,
const double* restrict q, int N,
const ewald_opts opt)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int m=0; m<nidx; m++)
{
double p = 0;
for(int n=0; n<N; n++)
{
double rvec[] = {x[idx[m] ]-x[n ],
x[idx[m]+N ]-x[n+N ],
x[idx[m]+2*N]-x[n+2*N]};
double qn = q[n];
for(int p0 = -opt.layers; p0<=opt.layers; p0++)
{
if(idx[m] == n && p0 == 0)
continue;
double r = sqrt((rvec[0]+p0*opt.box[0])*
(rvec[0]+p0*opt.box[0])+
rvec[1]*rvec[1]+
rvec[2]*rvec[2]
);
p += qn*erfc(opt.xi*r)/r;
}
}
phi[m] += p;
}
}
#endif
/* no input checking is done */
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
// input dims
const int N = mxGetM(X);
const int num_eval = mxGetN(IDX); // FIXME: indices assumed to be row vec
const double* idx_d = mxGetPr(IDX);
int* idx = mxMalloc(num_eval*sizeof(int));
index_translation(idx, idx_d, num_eval);
const double* x = mxGetPr(X);
const double* q = mxGetPr(Q);
#ifndef FORCE
PHI = mxCreateDoubleMatrix(num_eval, 1, mxREAL);
double* restrict phi = mxGetPr(PHI);
#else
/* This is to allocate 3 vectors for the force.
* (FIXME) Note that the variable is still called PHI.*/
PHI = mxCreateDoubleMatrix(num_eval, 3, mxREAL);
double* restrict phi = mxGetPr(PHI);
#endif
ewald_opts opt;
unpack_opt(&opt, OPT);
if(VERBOSE)
{
mexPrintf("[EWALD (%s)] MEX N=(%d,%d) ","RS1P",N,num_eval);
mexPrintf("xi = %.2f layers=%d\n",
opt.xi,opt.layers);
}
// call kernel
SE1P_direct_real(phi, idx, num_eval, x, q, N, opt);
mxFree(idx);
}
|
ast-dump-openmp-section.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp sections
{
#pragma omp section
;
}
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-section.c:3:1, line:9:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:9:1>
// CHECK-NEXT: `-OMPSectionsDirective {{.*}} <line:4:9, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, line:8:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-CompoundStmt {{.*}} <line:5:3, line:8:3> openmp_structured_block
// CHECK-NEXT: | `-OMPSectionDirective {{.*}} <line:6:9, col:20>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:7:5>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: | |-NullStmt {{.*}} <col:5> openmp_structured_block
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:6:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-section.c:6:9) *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-section.c:4:9) *const restrict'
// CHECK-NEXT: |-RecordDecl {{.*}} <line:6:9> col:9 implicit struct definition
// CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-NullStmt {{.*}} <line:7:5> openmp_structured_block
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:6:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-section.c:6:9) *const restrict'
|
tool_available_search.c | // RUN: %clang %flags -shared -fPIC %s -o %T/first_tool.so
// RUN: %clang %flags -DTOOL -DSECOND_TOOL -shared -fPIC %s -o %T/second_tool.so
// RUN: %clang %flags -DTOOL -DTHIRD_TOOL -shared -fPIC %s -o %T/third_tool.so
// RUN: %libomp-compile -DCODE && env OMP_TOOL_LIBRARIES=%T/non_existing_file.so:%T/first_tool.so:%T/second_tool.so:%T/third_tool.so %libomp-run | FileCheck %s
// REQUIRES: ompt
/*
* This file contains code for three OMPT shared library tool to be
* loaded and the code for the OpenMP executable.
* No option enables code for the first shared library
* (without an implementation of ompt_start_tool) during compilation
* -DTOOL -DSECOND_TOOL enables the code for the second tool during compilation
* -DTOOL -DTHIRD_TOOL enables the code for the third tool during compilation
* -DCODE enables the code for the executable during compilation
*/
#ifdef CODE
#include "stdio.h"
#include "omp.h"
#include "ompt.h"
int main()
{
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
int result = omp_control_tool(omp_control_tool_start, 0, NULL);
printf("0: control_tool()=%d\n", result);
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback
// CHECK: {{^}}0: Do not initialize tool
// CHECK: {{^}}0: Do initialize tool
// CHECK: {{^}}0: Tool initialized
// CHECK: {{^}}0: ompt_event_thread_begin
// CHECK-DAG: {{^}}0: ompt_event_thread_begin
// CHECK-DAG: {{^}}0: control_tool()=-1
// CHECK: {{^}}0: Tool finalized
return 0;
}
#endif /* CODE */
#ifdef TOOL
#include <ompt.h>
#include "stdio.h"
#ifdef SECOND_TOOL
// The second tool has an implementation of ompt_start_tool that returns NULL
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
printf("0: Do not initialize tool\n");
return NULL;
}
#elif defined(THIRD_TOOL)
// The third tool has an implementation of ompt_start_tool that returns a
// pointer to a valid instance of ompt_start_tool_result_t
static void
on_ompt_callback_thread_begin(
ompt_thread_type_t thread_type,
ompt_data_t *thread_data)
{
printf("0: ompt_event_thread_begin\n");
}
int ompt_initialize(
ompt_function_lookup_t lookup,
ompt_data_t *tool_data)
{
ompt_set_callback_t ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_set_callback(ompt_callback_thread_begin, (ompt_callback_t)on_ompt_callback_thread_begin);
printf("0: Tool initialized\n");
return 1;
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: Tool finalized\n");
}
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
printf("0: Do initialize tool\n");
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#endif
#endif /* TOOL */
|
flush.c | //Transpose without locks or critical
#include<stdio.h>
#include<time.h>
#include<omp.h>
void main()
{
int a[5][5],b[5][5],c[5][5],temp=0,ch,i,j;
printf("Menu\n1.Express Mode\n2.Custom Mode\n");
printf("Enter your choice:");
scanf("%d",&ch);
if(ch == 1)
{
int l = 1;
for(i=0;i<5;i++)
{
for(j=0;j<5;j++)
{
a[i][j] = l;
b[i][j] = 1;
l++;
}
}
}else{
int k=1;
for(i=0;i<5;i++)
{
for(j=0;j<5;j++)
{
printf("Enter element %d of first matrix:",k);
scanf("%d",&a[i][j]);
k++;
}
}
k = 1;
for(i=0;i<5;i++)
{
for(j=0;j<5;j++)
{
printf("Enter element %d of second matrix:",k);
scanf("%d",&b[i][j]);
k++;
}
}
}
printf("\nThe First Matrix is:\n");
for(i = 0; i < 5; i++)
{
for(j = 0; j < 5; j++)
{
printf("%d\t", a[i][j]);
}
printf("\n");
}
printf("\nThe Second Matrix is:\n");
for(i = 0; i < 5; i++)
{
for(j = 0; j < 5; j++)
{
printf("%d\t", b[i][j]);
}
printf("\n");
}
clock_t begin = clock();
#pragma omp parallel num_threads(5)
{
#pragma omp for
for(i = 0; i < 5; i++)
{
int id = omp_get_thread_num();
for(j = 0; j < i; j++)
{
#pragma omp flush
temp = a[i][j];
a[i][j] = a[j][i];
a[j][i] = temp;
}
printf("Thread %d\n",id);
}
}
printf("\nTranspose of First Matrix:\n");
for(i = 0; i < 5; i++)
{
for(j = 0; j < 5; j++)
{
printf("%d\t", a[i][j]);
}
printf("\n");
}
#pragma omp parallel num_threads(5)
{
#pragma omp for
for(i = 0; i < 5;i++)
{
int id = omp_get_thread_num();
for(j = 0; j < 5;j++)
{
#pragma omp flush
c[i][j] = a[i][j] + b[i][j];
}
printf("Thread %d\n",id);
}
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("CPU Time used = %lfms",time_spent);
printf("\nSum Matrix Is:\n");
for(i = 0; i < 5; i++)
{
for(j = 0; j < 5; j++)
{
printf("%d\t", c[i][j]);
}
printf("\n");
}
}
|
calculate_signed_distance_to_3d_condition_skin_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pooyan Dadvand
// Daniel Baumgaertner
// Johannes Wolf
//
#if !defined(KRATOS_CALCULATE_DISTANCE_CONDITION_PROCESS_H_INCLUDED )
#define KRATOS_CALCULATE_DISTANCE_CONDITION_PROCESS_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "includes/define.h"
#include "processes/process.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "spatial_containers/octree_binary.h"
#include "utilities/spatial_containers_configure.h"
#include "utilities/timer.h"
#include "utilities/math_utils.h"
#include "utilities/geometry_utilities.h"
#include "geometries/triangle_3d_3.h"
#include "utilities/body_normal_calculation_utils.h"
#include "utilities/parallel_utilities.h"
namespace Kratos
{
class DistanceSpatialContainersConditionConfigure
{
public:
class CellNodeData
{
double mDistance;
double mCoordinates[3];
std::size_t mId;
public:
double& Distance(){return mDistance;}
double& X() {return mCoordinates[0];}
double& Y() {return mCoordinates[1];}
double& Z() {return mCoordinates[2];}
double& operator[](int i) {return mCoordinates[i];}
std::size_t& Id(){return mId;}
};
///@name Type Definitions
///@{
enum { Dimension = 3,
DIMENSION = 3,
MAX_LEVEL = 12,
MIN_LEVEL = 2 // this cannot be less than 2!!!
};
typedef Point PointType; /// always the point 3D
typedef std::vector<double>::iterator DistanceIteratorType;
typedef PointerVectorSet<
GeometricalObject::Pointer,
IndexedObject,
std::less<typename IndexedObject::result_type>,
std::equal_to<typename IndexedObject::result_type>,
Kratos::shared_ptr<typename GeometricalObject::Pointer>,
std::vector< Kratos::shared_ptr<typename GeometricalObject::Pointer> >
> ContainerType;
typedef ContainerType::value_type PointerType;
typedef ContainerType::iterator IteratorType;
typedef PointerVectorSet<
GeometricalObject::Pointer,
IndexedObject,
std::less<typename IndexedObject::result_type>,
std::equal_to<typename IndexedObject::result_type>,
Kratos::shared_ptr<typename GeometricalObject::Pointer>,
std::vector< Kratos::shared_ptr<typename GeometricalObject::Pointer> >
> ResultContainerType;
typedef ResultContainerType::value_type ResultPointerType;
typedef ResultContainerType::iterator ResultIteratorType;
typedef GeometricalObject::Pointer pointer_type;
typedef CellNodeData cell_node_data_type;
typedef std::vector<CellNodeData*> data_type;
typedef std::vector<PointerType>::iterator PointerTypeIterator;
/// Pointer definition of DistanceSpatialContainersConditionConfigure
KRATOS_CLASS_POINTER_DEFINITION(DistanceSpatialContainersConditionConfigure);
///@}
///@name Life Cycle
///@{
/// Default constructor.
DistanceSpatialContainersConditionConfigure() {}
/// Destructor.
virtual ~DistanceSpatialContainersConditionConfigure() {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
static data_type* AllocateData() {
return new data_type(27, (CellNodeData*)NULL);
}
static void CopyData(data_type* source, data_type* destination) {
*destination = *source;
}
static void DeleteData(data_type* data) {
delete data;
}
///******************************************************************************************************************
///******************************************************************************************************************
static inline void CalculateBoundingBox(const PointerType& rObject, PointType& rLowPoint, PointType& rHighPoint)
{
rHighPoint = rObject->GetGeometry().GetPoint(0);
rLowPoint = rObject->GetGeometry().GetPoint(0);
for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++)
{
for(std::size_t i = 0; i<3; i++)
{
rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i];
rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i];
}
}
}
///******************************************************************************************************************
///******************************************************************************************************************
static inline void GetBoundingBox(const PointerType rObject, double* rLowPoint, double* rHighPoint)
{
for(std::size_t i = 0; i<3; i++)
{
rLowPoint[i] = rObject->GetGeometry().GetPoint(0)[i];
rHighPoint[i] = rObject->GetGeometry().GetPoint(0)[i];
}
for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++)
{
for(std::size_t i = 0; i<3; i++)
{
rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i];
rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i];
}
}
}
///******************************************************************************************************************
///******************************************************************************************************************
static inline bool Intersection(const PointerType& rObj_1, const PointerType& rObj_2)
{
Element::GeometryType& geom_1 = rObj_1->GetGeometry();
Element::GeometryType& geom_2 = rObj_2->GetGeometry();
return geom_1.HasIntersection(geom_2);
}
///******************************************************************************************************************
///******************************************************************************************************************
static inline bool IntersectionBox(const PointerType& rObject, const PointType& rLowPoint, const PointType& rHighPoint)
{
return rObject->GetGeometry().HasIntersection(rLowPoint, rHighPoint);
}
///******************************************************************************************************************
///******************************************************************************************************************
static inline bool IsIntersected(const Element::Pointer rObject, double Tolerance, const double* rLowPoint, const double* rHighPoint)
{
Point low_point(rLowPoint[0] - Tolerance, rLowPoint[1] - Tolerance, rLowPoint[2] - Tolerance);
Point high_point(rHighPoint[0] + Tolerance, rHighPoint[1] + Tolerance, rHighPoint[2] + Tolerance);
KRATOS_THROW_ERROR(std::logic_error, "Not Implemented method", "")
//return HasIntersection(rObject->GetGeometry(), low_point, high_point);
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
return " Spatial Containers Configure";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const {}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const {}
///@}
protected:
private:
/// Assignment operator.
DistanceSpatialContainersConditionConfigure& operator=(DistanceSpatialContainersConditionConfigure const& rOther);
/// Copy constructor.
DistanceSpatialContainersConditionConfigure(DistanceSpatialContainersConditionConfigure const& rOther);
}; // Class DistanceSpatialContainersConditionConfigure
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class CalculateSignedDistanceTo3DConditionSkinProcess
: public Process
{
public:
///@name Type Definitions
///@{
/// Pointer definition of CalculateSignedDistanceTo3DConditionSkinProcess
KRATOS_CLASS_POINTER_DEFINITION(CalculateSignedDistanceTo3DConditionSkinProcess);
typedef DistanceSpatialContainersConditionConfigure ConfigurationType;
typedef OctreeBinaryCell<ConfigurationType> CellType;
typedef OctreeBinary<CellType> OctreeType;
typedef ConfigurationType::cell_node_data_type CellNodeDataType;
typedef Point PointType; /// always the point 3D
typedef OctreeType::cell_type::object_container_type object_container_type;
typedef struct{
array_1d<double,3> Coordinates;
array_1d<double,3> StructElemNormal;
}IntersectionNodeStruct;
typedef struct{
std::vector<IntersectionNodeStruct> IntNodes;
}TetEdgeStruct;
///@}
///@name Life Cycle
///@{
/// Constructor.
CalculateSignedDistanceTo3DConditionSkinProcess(ModelPart& rThisModelPartStruc, ModelPart& rThisModelPartFluid)
: mrSkinModelPart(rThisModelPartStruc), mrBodyModelPart(rThisModelPartStruc), mrFluidModelPart(rThisModelPartFluid)
{
}
/// Destructor.
~CalculateSignedDistanceTo3DConditionSkinProcess() override
{
}
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
///******************************************************************************************************************
///******************************************************************************************************************
void Execute() override
{
KRATOS_TRY
/*
std::cout << "Clearing the list of correspondances between the FIXED MESH ELEMENTS and the EMBEDDED CONDITIONS THAT ARE CROSSING THEM..." << std::endl;
for( ModelPart::ElementIterator i_fluidElement = mrFluidModelPart.ElementsBegin();
i_fluidElement != mrFluidModelPart.ElementsEnd();
i_fluidElement++)
{
//i_fluidElement->GetValue(NEIGHBOUR_CONDITIONS).resize(0);
( i_fluidElement->GetValue(NEIGHBOUR_EMBEDDED_FACES)).reserve(6);
GlobalPointersVector<GeometricalObject >& rE = i_fluidElement->GetValue(NEIGHBOUR_EMBEDDED_FACES);
rE.erase(rE.begin(),rE.end() );
}
*/
//std::cout << "Generating the Octree..." << std::endl;
GenerateOctree();
//std::cout << "Generating the Octree finished" << std::endl;
DistanceFluidStructure();
//GenerateNodes();
CalculateDistance2(); // I have to change this. Pooyan.
// CalculateDistance();
// CalculateDistance2();
// double coord[3] = {0.4375, 0.57812, 0.5};
// double distance = DistancePositionInSpace(coord);
// KRATOS_WATCH(distance);
//mrSkinModelPart.GetCommunicator().AssembleCurrentData(DISTANCE);
// std::ofstream mesh_file1("octree1.post.msh");
// std::ofstream res_file("octree1.post.res");
// Timer::Start("Writing Gid conform Mesh");
// PrintGiDMesh(mesh_file1);
// PrintGiDResults(res_file);
//octree.PrintGiDMeshNew(mesh_file2);
// Timer::Stop("Writing Gid conform Mesh");
// KRATOS_WATCH(mrBodyModelPart);
//delete octree. TODO: Carlos
KRATOS_CATCH("");
}
///******************************************************************************************************************
///******************************************************************************************************************
void DistanceFluidStructure()
{
// Initialize nodal distances each node in the domain to 1.0
InitializeDistances();
// Initialize index table to define line Edges of fluid element
BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable;
SetIndexTable(TetEdgeIndexTable);
for( ModelPart::ElementIterator i_fluidElement = mrFluidModelPart.ElementsBegin();
i_fluidElement != mrFluidModelPart.ElementsEnd();
i_fluidElement++)
{
i_fluidElement->GetValue(EMBEDDED_VELOCITY)=ZeroVector(3);
}
// loop over all fluid elements
for( ModelPart::ElementIterator i_fluidElement = mrFluidModelPart.ElementsBegin();
i_fluidElement != mrFluidModelPart.ElementsEnd();
i_fluidElement++)
{
CalcNodalDistancesOfTetNodes( i_fluidElement , TetEdgeIndexTable );
}
KRATOS_WATCH("ENDOF LOOP")
}
///******************************************************************************************************************
///******************************************************************************************************************
void InitializeDistances()
{
ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray();
// reset the node distance to 1.0 which is the maximum distance in our normalized space.
int nodesSize = nodes.size();
for(int i = 0 ; i < nodesSize ; i++)
nodes[i]->GetSolutionStepValue(DISTANCE) = 1.0;
ModelPart::ElementsContainerType::ContainerType& fluid_elements = mrFluidModelPart.ElementsArray();
array_1d<double,4> ElementalDistances;
const double initial_distance = 1.0;
ElementalDistances[0] = initial_distance;
ElementalDistances[1] = initial_distance;
ElementalDistances[2] = initial_distance;
ElementalDistances[3] = initial_distance;
// reset the elemental distance to 1.0 which is the maximum distance in our normalized space.
for(unsigned int i = 0 ; i < fluid_elements.size() ; i++)
{
fluid_elements[i]->SetValue(ELEMENTAL_DISTANCES,ElementalDistances);
fluid_elements[i]->GetValue(SPLIT_ELEMENT) = false;
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void SetIndexTable( BoundedMatrix<unsigned int,6,2>& TetEdgeIndexTable )
{
// Initialize index table to define line Edges of fluid element
TetEdgeIndexTable(0,0) = 0;
TetEdgeIndexTable(0,1) = 1;
TetEdgeIndexTable(1,0) = 0;
TetEdgeIndexTable(1,1) = 2;
TetEdgeIndexTable(2,0) = 0;
TetEdgeIndexTable(2,1) = 3;
TetEdgeIndexTable(3,0) = 1;
TetEdgeIndexTable(3,1) = 2;
TetEdgeIndexTable(4,0) = 1;
TetEdgeIndexTable(4,1) = 3;
TetEdgeIndexTable(5,0) = 2;
TetEdgeIndexTable(5,1) = 3;
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcNodalDistancesOfTetNodes( ModelPart::ElementsContainerType::iterator& i_fluidElement,
BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable)
{
std::vector<OctreeType::cell_type*> leaves;
std::vector<TetEdgeStruct> IntersectedTetEdges;
unsigned int NumberIntersectionsOnTetCorner = 0;
// Get leaves of octree intersecting with fluid element
mOctree.GetIntersectedLeaves(*(i_fluidElement).base(),leaves);
int intersection_counter=0;
//i_fluidElement->GetValue(EMBEDDED_VELOCITY)=ZeroVector(3);
// Loop over all 6 line Edges of the tetrahedra
for(unsigned int i_tetEdge = 0; i_tetEdge < 6; i_tetEdge++)
{
IdentifyIntersectionNodes( i_fluidElement , i_tetEdge , leaves , IntersectedTetEdges ,
NumberIntersectionsOnTetCorner , TetEdgeIndexTable, intersection_counter );
}
if (intersection_counter!=0)
i_fluidElement->GetValue(EMBEDDED_VELOCITY)/=3.0*intersection_counter;
//else
// i_fluidElement->GetValue(EMBEDDED_VELOCITY)=ZeroVector(3);
//KRATOS_WATCH("============================================================")
// KRATOS_WATCH(i_fluidElement->GetValue(EMBEDDED_VELOCITY))
//KRATOS_WATCH("???????????????????????????????????????????????????????????????")
//if (intersection_counter!=0)
// KRATOS_WATCH(intersection_counter)
if(IntersectedTetEdges.size() > 0)
CalcNodalDistanceTo3DSkin( IntersectedTetEdges , i_fluidElement , NumberIntersectionsOnTetCorner );
}
///******************************************************************************************************************
///******************************************************************************************************************
void IdentifyIntersectionNodes( ModelPart::ElementsContainerType::iterator& i_fluidElement,
unsigned int i_tetEdge,
std::vector<OctreeType::cell_type*>& leaves,
std::vector<TetEdgeStruct>& IntersectedTetEdges,
unsigned int& NumberIntersectionsOnTetCorner,
BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable,
int& intersection_counter)
{
std::vector<unsigned int> IntersectingStructCondID;
TetEdgeStruct NewTetEdge;
// Get nodes of line Edge
unsigned int EdgeStartIndex = TetEdgeIndexTable(i_tetEdge,0);
unsigned int EdgeEndIndex = TetEdgeIndexTable(i_tetEdge,1);
PointType& P1 = i_fluidElement->GetGeometry()[EdgeStartIndex];
PointType& P2 = i_fluidElement->GetGeometry()[EdgeEndIndex];
double EdgeNode1[3] = {P1.X() , P1.Y() , P1.Z()};
double EdgeNode2[3] = {P2.X() , P2.Y() , P2.Z()};
//int count=0;
// loop over all octree cells which are intersected by the fluid element
for(unsigned int i_cell = 0 ; i_cell < leaves.size() ; i_cell++)
{
// Structural element contained in one cell of the octree
object_container_type* struct_cond = (leaves[i_cell]->pGetObjects());
// loop over all structural elements within each octree cell
for(object_container_type::iterator i_StructCondition = struct_cond->begin(); i_StructCondition != struct_cond->end(); i_StructCondition++)
{
//KRATOS_WATCH(struct_cond->size())
if( StructuralElementNotYetConsidered( (*i_StructCondition)->Id() , IntersectingStructCondID ) )
{
// Calculate and associate intersection point to the current fluid element
double IntersectionPoint[3] = {0.0 , 0.0 , 0.0};
int TetEdgeHasIntersections = IntersectionTriangleSegment( (*i_StructCondition)->GetGeometry() , EdgeNode1 , EdgeNode2 , IntersectionPoint );
if( TetEdgeHasIntersections == 1 )
{
IntersectionNodeStruct NewIntersectionNode;
// Assign information to the intersection node
NewIntersectionNode.Coordinates[0] = IntersectionPoint[0];
NewIntersectionNode.Coordinates[1] = IntersectionPoint[1];
NewIntersectionNode.Coordinates[2] = IntersectionPoint[2];
if ( IsNewIntersectionNode( NewIntersectionNode , IntersectedTetEdges ) )
{
if( IsIntersectionNodeOnTetEdge( IntersectionPoint , EdgeNode1 , EdgeNode2 ) )
{
// Calculate normal of the structural element at the position of the intersection point
CalculateNormal3D((*i_StructCondition)->GetGeometry(),NewIntersectionNode.StructElemNormal);
// add the new intersection point to the list of intersection points of the fluid element
NewTetEdge.IntNodes.push_back(NewIntersectionNode);
//(i_fluidElement->GetValue(NEIGHBOUR_EMBEDDED_FACES)).push_back( GeometricalObject::WeakPointer( *(i_StructCondition.base()) ) );
/*
array_1d<double,3> emb_vel=(*i_StructCondition)->GetGeometry()[0].FastGetSolutionStepValue(VELOCITY);
emb_vel+=(*i_StructCondition)->GetGeometry()[1].FastGetSolutionStepValue(VELOCITY);
emb_vel+=(*i_StructCondition)->GetGeometry()[2].FastGetSolutionStepValue(VELOCITY);
//KRATOS_WATCH(emb_vel)
i_fluidElement->GetValue(EMBEDDED_VELOCITY)+=emb_vel;
intersection_counter++;
*/
// check, how many intersection nodes are located on corner points of the tetrahedra
if ( IsIntersectionOnCorner( NewIntersectionNode , EdgeNode1 , EdgeNode2) )
NumberIntersectionsOnTetCorner++;
//BY NOW I WANT TO CONSIDER ONLY THE EDGES THAT ARE CUT "NOT AT THE VERTEX"
else
{
// double dummy=0.0;
//(i_fluidElement->GetValue(NEIGHBOUR_EMBEDDED_FACES)).push_back( GeometricalObject::WeakPointer( *(i_StructCondition.base()) ) );
array_1d<double,3> emb_vel=(*i_StructCondition)->GetGeometry()[0].FastGetSolutionStepValue(VELOCITY);
emb_vel+=(*i_StructCondition)->GetGeometry()[1].FastGetSolutionStepValue(VELOCITY);
emb_vel+=(*i_StructCondition)->GetGeometry()[2].FastGetSolutionStepValue(VELOCITY);
//KRATOS_WATCH(emb_vel)
i_fluidElement->GetValue(EMBEDDED_VELOCITY)+=emb_vel;
intersection_counter++;
}
//(pGeom[i].GetValue(NEIGHBOUR_ELEMENTS)).push_back( Element::WeakPointer( *(ie.base()) ) );
}
}
}
}
}
}
// check, if intersection nodes have been found on the tet edge --> if yes, then add these information to the TetEdgeVector
if( NewTetEdge.IntNodes.size() > 0 )
IntersectedTetEdges.push_back(NewTetEdge);
}
///******************************************************************************************************************
///******************************************************************************************************************
bool StructuralElementNotYetConsidered( unsigned int IDCurrentStructCond,
std::vector<unsigned int>& IntersectingStructCondID )
{
// check if the structural element was already considered as intersecting element
for(unsigned int k = 0 ; k < IntersectingStructCondID.size() ; k++)
{
if( IDCurrentStructCond == IntersectingStructCondID[k] )
return false;
}
// if structural element has not been considered in another octree, which also intersects the fluid element
// add the new object ID to the vector
IntersectingStructCondID.push_back( IDCurrentStructCond );
return true;
}
///******************************************************************************************************************
///******************************************************************************************************************
bool IsIntersectionNodeOnTetEdge( double* IntersectionPoint , double* EdgeNode1 , double* EdgeNode2 )
{
// check, if intersection point is located on any edge of the fluid element
array_1d<double,3> ConnectVectTetNodeIntNode1;
array_1d<double,3> ConnectVectTetNodeIntNode2;
array_1d<double,3> EdgeVector;
ConnectVectTetNodeIntNode1[0] = IntersectionPoint[0] - EdgeNode1[0];
ConnectVectTetNodeIntNode1[1] = IntersectionPoint[1] - EdgeNode1[1];
ConnectVectTetNodeIntNode1[2] = IntersectionPoint[2] - EdgeNode1[2];
ConnectVectTetNodeIntNode2[0] = IntersectionPoint[0] - EdgeNode2[0];
ConnectVectTetNodeIntNode2[1] = IntersectionPoint[1] - EdgeNode2[1];
ConnectVectTetNodeIntNode2[2] = IntersectionPoint[2] - EdgeNode2[2];
double LengthConnectVect1 = norm_2( ConnectVectTetNodeIntNode1 );
double LengthConnectVect2 = norm_2( ConnectVectTetNodeIntNode2 );
EdgeVector[0] = EdgeNode2[0] - EdgeNode1[0];
EdgeVector[1] = EdgeNode2[1] - EdgeNode1[1];
EdgeVector[2] = EdgeNode2[2] - EdgeNode1[2];
double MaxEdgeLength = norm_2( EdgeVector );
// if both connection vectors (corner point --> intersection point)
// are smaller or equal to the edge length of tetrahedra,
// then intersection point is located on the edge
if( (LengthConnectVect1 <= (MaxEdgeLength)) && (LengthConnectVect2 <= (MaxEdgeLength)) )
return true;
else
return false;
}
///******************************************************************************************************************
///******************************************************************************************************************
bool IsNewIntersectionNode(IntersectionNodeStruct& NewIntersectionNode,
std::vector<TetEdgeStruct> IntersectedTetEdges )
{
array_1d<double,3> DiffVector;
double NormDiffVector;
unsigned int NumberIntNodes;
for( unsigned int i_TetEdge = 0 ; i_TetEdge < IntersectedTetEdges.size() ; i_TetEdge++ )
{
NumberIntNodes = IntersectedTetEdges[i_TetEdge].IntNodes.size();
for( unsigned int i_IntNode = 0 ; i_IntNode < NumberIntNodes ; i_IntNode++ )
{
DiffVector[0] = NewIntersectionNode.Coordinates[0] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[0];
DiffVector[1] = NewIntersectionNode.Coordinates[1] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[1];
DiffVector[2] = NewIntersectionNode.Coordinates[2] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[2];
NormDiffVector = norm_2(DiffVector);
if( NormDiffVector < epsilon )
return false;
}
}
// if the new intersection node is not existing (as intersection with a corner point), then return false
return true;
}
///******************************************************************************************************************
///******************************************************************************************************************
bool IsIntersectionOnCorner(IntersectionNodeStruct& NewIntersectionNode,
double* EdgeNode1,
double* EdgeNode2 )
{
array_1d<double,3> DiffVector;
double NormDiffVector;
DiffVector[0] = EdgeNode1[0] - NewIntersectionNode.Coordinates[0];
DiffVector[1] = EdgeNode1[1] - NewIntersectionNode.Coordinates[1];
DiffVector[2] = EdgeNode1[2] - NewIntersectionNode.Coordinates[2];
NormDiffVector = norm_2(DiffVector);
if( NormDiffVector < epsilon )
return true;
DiffVector[0] = EdgeNode2[0] - NewIntersectionNode.Coordinates[0];
DiffVector[1] = EdgeNode2[1] - NewIntersectionNode.Coordinates[1];
DiffVector[2] = EdgeNode2[2] - NewIntersectionNode.Coordinates[2];
NormDiffVector = norm_2(DiffVector);
if( NormDiffVector < epsilon )
return true;
else
return false;
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalculateNormal3D(Element::GeometryType& rGeometry,
array_1d<double,3>& rResultNormal)
{
array_1d<double,3> v1;
array_1d<double,3> v2 ;
v1[0] = rGeometry[1].X() - rGeometry[0].X();
v1[1] = rGeometry[1].Y() - rGeometry[0].Y();
v1[2] = rGeometry[1].Z() - rGeometry[0].Z();
v2[0] = rGeometry[2].X() - rGeometry[0].X();
v2[1] = rGeometry[2].Y() - rGeometry[0].Y();
v2[2] = rGeometry[2].Z() - rGeometry[0].Z();
MathUtils<double>::CrossProduct(rResultNormal,v1,v2);
rResultNormal *= 0.5;
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcNodalDistanceTo3DSkin(std::vector<TetEdgeStruct>& IntersectedTetEdges,
ModelPart::ElementsContainerType::iterator& i_fluid_element,
unsigned int NumberIntersectionsOnTetCorner)
{
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure;
array_1d<double,4> ElementalDistances;
// Reduce all found intersection nodes located on each tetdrahedra edge to just one intersection node by averaging
ComputeApproximationNodes(IntersectedTetEdges,NodesOfApproximatedStructure);
// Intersection with one corner point
if( NodesOfApproximatedStructure.size() == 1 && NumberIntersectionsOnTetCorner == 1 )
{
CalcSignedDistancesToOneIntNode(i_fluid_element,NodesOfApproximatedStructure,ElementalDistances);
i_fluid_element->GetValue(SPLIT_ELEMENT) = true;
}
// Intersection with two corner points / one tetrahedra edge
if( NodesOfApproximatedStructure.size() == 2 && NumberIntersectionsOnTetCorner == 2 )
{
CalcSignedDistancesToTwoIntNodes(i_fluid_element,NodesOfApproximatedStructure,ElementalDistances);
i_fluid_element->GetValue(SPLIT_ELEMENT) = true;
}
// Intersection with three tetrahedra edges
if( NodesOfApproximatedStructure.size() == 3 )
{
CalcSignedDistancesToThreeIntNodes(i_fluid_element,NodesOfApproximatedStructure,IntersectedTetEdges,ElementalDistances);
i_fluid_element->GetValue(SPLIT_ELEMENT) = true;
}
// Intersection with four tetrahedra edges
if( NodesOfApproximatedStructure.size() == 4 )
{
CalcSignedDistancesToFourIntNodes(i_fluid_element,NodesOfApproximatedStructure,IntersectedTetEdges,ElementalDistances);
i_fluid_element->GetValue(SPLIT_ELEMENT) = true;
}
// In case there is NO intersection with fluid element
if( i_fluid_element->GetValue(SPLIT_ELEMENT) == true )
AssignDistancesToElements(i_fluid_element,ElementalDistances);
}
///******************************************************************************************************************
///******************************************************************************************************************
void ComputeApproximationNodes(std::vector<TetEdgeStruct> IntersectedTetEdges,
std::vector<IntersectionNodeStruct>& NodesOfApproximatedStructure)
{
unsigned int NumberIntNodes;
double sum_X;
double sum_Y;
double sum_Z;
// calculate average of all intersection nodes of each tetrahedra edge
for(unsigned int i_TetEdge = 0 ; i_TetEdge < IntersectedTetEdges.size() ; i_TetEdge++)
{
NumberIntNodes = IntersectedTetEdges[i_TetEdge].IntNodes.size();
sum_X = 0;
sum_Y = 0;
sum_Z = 0;
for( unsigned int i_IntNode = 0 ; i_IntNode < NumberIntNodes ; i_IntNode++ )
{
sum_X += IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[0];
sum_Y += IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[1];
sum_Z += IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[2];
}
IntersectionNodeStruct NewApproximationNode;
NewApproximationNode.Coordinates[0] = sum_X / NumberIntNodes;
NewApproximationNode.Coordinates[1] = sum_Y / NumberIntNodes;
NewApproximationNode.Coordinates[2] = sum_Z / NumberIntNodes;
if(IntersectedTetEdges.size() <= 2)
NewApproximationNode.StructElemNormal = IntersectedTetEdges[i_TetEdge].IntNodes[0].StructElemNormal;
NodesOfApproximatedStructure.push_back(NewApproximationNode);
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcSignedDistancesToOneIntNode( ModelPart::ElementsContainerType::iterator& i_fluid_element,
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
array_1d<double,4>& ElementalDistances)
{
const array_1d<double,3>& IntersectionNodeCoord = NodesOfApproximatedStructure[0].Coordinates;
array_1d<double,3> DistVecTetNode;
array_1d<double,3> TetNode;
array_1d<double,3> NormalAtIntersectionNode;
double NormDistTetNode;
double InnerProduct;
Geometry< Node<3> >& rFluidGeom = i_fluid_element->GetGeometry();
for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++)
{
// Get coordinates of the fluid elmenent nodes
TetNode = rFluidGeom[i_TetNode].Coordinates();
// Compute unsigned distance
DistVecTetNode[0] = TetNode[0] - IntersectionNodeCoord[0];
DistVecTetNode[1] = TetNode[1] - IntersectionNodeCoord[1];
DistVecTetNode[2] = TetNode[2] - IntersectionNodeCoord[2];
NormDistTetNode = norm_2( DistVecTetNode );
// Get normal at intersection
NormalAtIntersectionNode = NodesOfApproximatedStructure[0].StructElemNormal;
InnerProduct = inner_prod(DistVecTetNode,NormalAtIntersectionNode);
// Assign distances as nodal solution values
if(InnerProduct>epsilon)
ElementalDistances[i_TetNode] = NormDistTetNode;
else if(InnerProduct>-epsilon)
ElementalDistances[i_TetNode] = 0;
else
ElementalDistances[i_TetNode] = -NormDistTetNode;
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcSignedDistancesToTwoIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_element,
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
array_1d<double,4>& ElementalDistances)
{
const array_1d<double,3>& IntersectionNode1Coord = NodesOfApproximatedStructure[0].Coordinates;
const array_1d<double,3>& IntersectionNode2Coord = NodesOfApproximatedStructure[1].Coordinates;
array_1d<double,3> TetNode;
array_1d<double,3> DistVecTetNode;
array_1d<double,3> NormalAtIntersectionNode1;
array_1d<double,3> NormalAtIntersectionNode2;
array_1d<double,3> ResNormal;
double InnerProduct;
double NormDistTetNode;
const Point LinePoint1 = Point(IntersectionNode1Coord[0] , IntersectionNode1Coord[1] , IntersectionNode1Coord[2]);
const Point LinePoint2 = Point(IntersectionNode2Coord[0] , IntersectionNode2Coord[1] , IntersectionNode2Coord[2]);
Geometry< Node<3> >& rFluidGeom = i_fluid_element->GetGeometry();
for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++)
{
// Get coordinates of the fluid element nodes
TetNode = rFluidGeom(i_TetNode)->Coordinates();
// Compute distance to point
NormDistTetNode = GeometryUtils::PointDistanceToLineSegment3D(LinePoint1, LinePoint2 , Point(TetNode[0],TetNode[1],TetNode[2]));
// Compute unsigned distance vector by assuming the mean position vector of the two intersection points
DistVecTetNode[0] = TetNode[0] - IntersectionNode1Coord[0];
DistVecTetNode[1] = TetNode[1] - IntersectionNode1Coord[1];
DistVecTetNode[2] = TetNode[2] - IntersectionNode1Coord[2];
// Get normal at intersections, average them and check direction of distances
NormalAtIntersectionNode1 = NodesOfApproximatedStructure[0].StructElemNormal;
NormalAtIntersectionNode2 = NodesOfApproximatedStructure[1].StructElemNormal;
// Compute unsigned distance
ResNormal[0] = 0.5*(NormalAtIntersectionNode1[0] + NormalAtIntersectionNode2[0]);
ResNormal[1] = 0.5*(NormalAtIntersectionNode1[1] + NormalAtIntersectionNode2[1]);
ResNormal[2] = 0.5*(NormalAtIntersectionNode1[2] + NormalAtIntersectionNode2[2]);
InnerProduct = inner_prod(DistVecTetNode,ResNormal);
// Assign distances as nodal solution values
if(InnerProduct>epsilon)
ElementalDistances[i_TetNode] = NormDistTetNode;
else if(InnerProduct>-epsilon)
ElementalDistances[i_TetNode] = 0;
else
ElementalDistances[i_TetNode] = -NormDistTetNode;
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcSignedDistancesToThreeIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_element,
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
std::vector<TetEdgeStruct> IntersectedTetEdges,
array_1d<double,4>& ElementalDistances)
{
array_1d<unsigned int,3> IndexNodes;
IndexNodes[0] = 0;
IndexNodes[1] = 1;
IndexNodes[2] = 2;
// Compute distance for each tetrahedra node to the triangle to approximate the structure
CalcSignedDistancesToApproxTriangle( i_fluid_element , NodesOfApproximatedStructure , IntersectedTetEdges , ElementalDistances , IndexNodes );
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcSignedDistancesToFourIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_element,
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
std::vector<TetEdgeStruct> IntersectedTetEdges,
array_1d<double,4>& ElementalDistances)
{
array_1d<unsigned int,3> IndexNodes_T1; // nodes of first triangle
array_1d<unsigned int,3> IndexNodes_T2; // nodes of second triangle
array_1d<double,4> ElementalDistances_T1;
array_1d<double,4> ElementalDistances_T2;
double dist_T1;
double dist_T2;
// Generate 2 triangles out of the 4 int nodes which form a parallelogram together
// Therefor first define arbitrarily a set of 3 nodes which form a triangle and search for the 2 nodes
// of the triangle which form the other triangle together with the fourth node
IndexNodes_T1[0] = 0;
IndexNodes_T1[1] = 1;
IndexNodes_T1[2] = 2;
FindIndexNodesOfTriangle2(NodesOfApproximatedStructure,IndexNodes_T2);
// Compute distance for first triangle
CalcSignedDistancesToApproxTriangle( i_fluid_element , NodesOfApproximatedStructure , IntersectedTetEdges,
ElementalDistances_T1 , IndexNodes_T1 );
// Compute distance for second triangle
CalcSignedDistancesToApproxTriangle( i_fluid_element , NodesOfApproximatedStructure , IntersectedTetEdges,
ElementalDistances_T2 , IndexNodes_T2 );
// Determine about the minimal distance by considering the distances to both triangles
for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++)
{
dist_T1 = ElementalDistances_T1[i_TetNode];
dist_T2 = ElementalDistances_T2[i_TetNode];
if(fabs(dist_T1) < fabs(dist_T2))
ElementalDistances[i_TetNode] = dist_T1;
else
ElementalDistances[i_TetNode] = dist_T2;
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void FindIndexNodesOfTriangle2(std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
array_1d<unsigned int,3>& IndexNodes_T2)
{
double maxDist = 0;
unsigned int indexExcludedNode = 1000000; // index of the node which is not part of the second triangle
array_1d<double,3> TrianglePoint;
array_1d<double,3> RemainingPoint;
array_1d<double,3> DistVecNode;
RemainingPoint = NodesOfApproximatedStructure[3].Coordinates;
// strategy: these two nodes of the first triangle form a triangle with the 4th node, which are closest to that node
// --> look for these nodes with the shortest distance to the remaining node
for(unsigned int i_TriangleNode = 0 ; i_TriangleNode < 3 ; i_TriangleNode++)
{
TrianglePoint = NodesOfApproximatedStructure[i_TriangleNode].Coordinates;
DistVecNode[0] = RemainingPoint[0] - TrianglePoint[0];
DistVecNode[1] = RemainingPoint[1] - TrianglePoint[1];
DistVecNode[2] = RemainingPoint[2] - TrianglePoint[2];
if(norm_2(DistVecNode) > maxDist)
{
maxDist = norm_2(DistVecNode);
indexExcludedNode = i_TriangleNode;
}
}
// assign the "not excluded" nodes to the index vector of the second triangle
unsigned int indexCursor = 0;
for(unsigned int k = 0 ; k < 3 ; k++)
{
if(indexExcludedNode != k)
{
IndexNodes_T2[indexCursor] = k;
indexCursor += 1;
}
}
IndexNodes_T2[2] = 3;
}
///******************************************************************************************************************
///******************************************************************************************************************
void CalcSignedDistancesToApproxTriangle( ModelPart::ElementsContainerType::iterator& i_fluid_element,
std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure,
std::vector<TetEdgeStruct> IntersectedTetEdges,
array_1d<double,4>& ElementalDistances,
array_1d<unsigned int,3> IndexNodes)
{
Geometry< Node<3> >& rFluidGeom = i_fluid_element->GetGeometry();
for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++)
{
array_1d<double,3> TetNode;
array_1d<double,3> IntersectionNode1Coord;
array_1d<double,3> IntersectionNode2Coord;
array_1d<double,3> IntersectionNode3Coord;
Point ApproxTrianglePoint1;
Point ApproxTrianglePoint2;
Point ApproxTrianglePoint3;
double UnsignedDistance;
double InnerProduct;
unsigned int IndexNode1;
unsigned int IndexNode2;
unsigned int IndexNode3;
// Get coordinates of the fluid element nodes
TetNode = rFluidGeom(i_TetNode)->Coordinates();
IndexNode1 = IndexNodes[0];
IndexNode2 = IndexNodes[1];
IndexNode3 = IndexNodes[2];
IntersectionNode1Coord = NodesOfApproximatedStructure[IndexNode1].Coordinates;
IntersectionNode2Coord = NodesOfApproximatedStructure[IndexNode2].Coordinates;
IntersectionNode3Coord = NodesOfApproximatedStructure[IndexNode3].Coordinates;
ApproxTrianglePoint1 = Point(IntersectionNode1Coord[0] , IntersectionNode1Coord[1] , IntersectionNode1Coord[2]);
ApproxTrianglePoint2 = Point(IntersectionNode2Coord[0] , IntersectionNode2Coord[1] , IntersectionNode2Coord[2]);
ApproxTrianglePoint3 = Point(IntersectionNode3Coord[0] , IntersectionNode3Coord[1] , IntersectionNode3Coord[2]);
// Compute distance from tet node to current triangle
UnsignedDistance = GeometryUtils::PointDistanceToTriangle3D(ApproxTrianglePoint1, ApproxTrianglePoint2 , ApproxTrianglePoint3 , Point(TetNode[0],TetNode[1],TetNode[2]));
bool TetNodeIsInsideStructure = true;
bool TetNodeIsOnStructure = true;
array_1d <double,3> DistVec;
array_1d <double,3> NormalAtIntersectionNode;
for( unsigned int i_TetEdge = 0 ; i_TetEdge < IntersectedTetEdges.size() ; i_TetEdge++ )
{
for( unsigned int i_IntNode = 0 ; i_IntNode < IntersectedTetEdges[i_TetEdge].IntNodes.size() ; i_IntNode++ )
{
DistVec[0] = TetNode[0] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[0];
DistVec[1] = TetNode[1] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[1];
DistVec[2] = TetNode[2] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[2];
NormalAtIntersectionNode = IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].StructElemNormal;
InnerProduct = inner_prod(DistVec,NormalAtIntersectionNode);
if(InnerProduct > epsilon)
{
TetNodeIsInsideStructure = false;
TetNodeIsOnStructure = false;
}
else if (InnerProduct < -epsilon)
TetNodeIsOnStructure = false;
}
}
// Assign distances as nodal solution values ( + = outside of structure, - = inside structure)
if( TetNodeIsInsideStructure == true )
ElementalDistances[i_TetNode] = -UnsignedDistance;
else if( TetNodeIsOnStructure == true )
ElementalDistances[i_TetNode] = 0;
else
ElementalDistances[i_TetNode] = +UnsignedDistance;
}
}
///******************************************************************************************************************
///******************************************************************************************************************
void AssignDistancesToElements(ModelPart::ElementsContainerType::iterator& i_fluid_element,
array_1d<double,4> ElementalDistances)
{
Geometry< Node<3> >& rFluidGeom = i_fluid_element->GetGeometry();
array_1d<double,4> MinElementalDistances;
for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++)
{
//Assign distances to the element, if a smaller value could be found
if( fabs(ElementalDistances[i_TetNode]) < fabs(i_fluid_element->GetValue(ELEMENTAL_DISTANCES)[i_TetNode]) )
MinElementalDistances[i_TetNode] = ElementalDistances[i_TetNode];
else
MinElementalDistances[i_TetNode] = i_fluid_element->GetValue(ELEMENTAL_DISTANCES)[i_TetNode];
//Assign distances to the single nodes (for visualization), if a smaller value could be found
if( fabs(ElementalDistances[i_TetNode]) < fabs(rFluidGeom[i_TetNode].GetSolutionStepValue(DISTANCE)) )
rFluidGeom[i_TetNode].GetSolutionStepValue(DISTANCE) = ElementalDistances[i_TetNode];
}
i_fluid_element->SetValue(ELEMENTAL_DISTANCES,MinElementalDistances);
}
///******************************************************************************************************************
///******************************************************************************************************************
/*
void GenerateOctree()
{
Timer::Start("Generating Octree");
// Setting the boundingbox for non-normalized coordinates
const int dimension = 3;
double boundingBox_low[3],boundingBox_high[3];
for(int i = 0; i < dimension; i++)
{
boundingBox_low[i] = mrSkinModelPart.NodesBegin()->Coordinates()[i];
boundingBox_high[i] = mrSkinModelPart.NodesBegin()->Coordinates()[i];
}
for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin();
i_node != mrSkinModelPart.NodesEnd();
i_node++)
{
for(int i = 0; i < dimension; i++)
{
if(i_node->Coordinates()[i] < boundingBox_low[i]) boundingBox_low[i] = i_node->Coordinates()[i];
if(i_node->Coordinates()[i] > boundingBox_high[i]) boundingBox_high[i] = i_node->Coordinates()[i];
}
}
mOctree.SetBoundingBox(boundingBox_low,boundingBox_high);
//mOctree.RefineWithUniformSize(0.0625);
for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin() ; i_node != mrSkinModelPart.NodesEnd() ; i_node++)
{
double temp_point[3];
const array_1d<double,3>& r_coordinates = i_node->Coordinates();
temp_point[0] = r_coordinates[0];
temp_point[1] = r_coordinates[1];
temp_point[2] = r_coordinates[2];
mOctree.Insert(temp_point);
}
//mOctree.Constrain2To1(); // To be removed. Pooyan.
for(ModelPart::ElementIterator i_element = mrSkinModelPart.ElementsBegin() ; i_element != mrSkinModelPart.ElementsEnd() ; i_element++)
{
mOctree.Insert(*(i_element).base());
}
Timer::Stop("Generating Octree");
// octree.Insert(*(mrSkinModelPart.ElementsBegin().base()));
KRATOS_WATCH(mOctree);
// std::cout << "######## WRITING OCTREE MESH #########" << std::endl;
// std::ofstream myfile;
// myfile.open ("unserbaum.post.msh");
// mOctree.PrintGiDMesh(myfile);
// myfile.close();
}
*/
void GenerateOctree()
{
Timer::Start("Generating Octree");
//std::cout << "Generating the Octree..." << std::endl;
double low[3];
double high[3];
for (int i = 0 ; i < 3; i++)
{
low[i] = high[i] = mrFluidModelPart.NodesBegin()->Coordinates()[i];
}
// loop over all structure nodes
for(ModelPart::NodeIterator i_node = mrFluidModelPart.NodesBegin();
i_node != mrFluidModelPart.NodesEnd();
i_node++)
{
const array_1d<double,3>& r_coordinates = i_node->Coordinates();
for (int i = 0 ; i < 3; i++)
{
low[i] = r_coordinates[i] < low[i] ? r_coordinates[i] : low[i];
high[i] = r_coordinates[i] > high[i] ? r_coordinates[i] : high[i];
}
}
// KRATOS_WATCH( low[0] )
// KRATOS_WATCH( low[1] )
// KRATOS_WATCH( low[2] )
// KRATOS_WATCH( "" )
// KRATOS_WATCH( high[0] )
// KRATOS_WATCH( high[1] )
// KRATOS_WATCH( high[2] )
mOctree.SetBoundingBox(low,high);
//mOctree.RefineWithUniformSize(0.0625);
// loop over all structure nodes
for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin();
i_node != mrSkinModelPart.NodesEnd();
i_node++)
{
double temp_point[3];
temp_point[0] = i_node->X();
temp_point[1] = i_node->Y();
temp_point[2] = i_node->Z();
mOctree.Insert(temp_point);
}
//mOctree.Constrain2To1(); // To be removed. Pooyan.
// loop over all structure elements
//for(ModelPart::ElementIterator i_element = mrSkinModelPart.ElementsBegin();
// i_element != mrSkinModelPart.ElementsEnd();
// i_element++)
for(ModelPart::ConditionIterator i_cond = mrSkinModelPart.ConditionsBegin();
i_cond != mrSkinModelPart.ConditionsEnd();
i_cond++)
{
mOctree.Insert(*(i_cond).base());
}
Timer::Stop("Generating Octree");
// KRATOS_WATCH(mOctree);
// std::cout << "######## WRITING OCTREE MESH #########" << std::endl;
// std::ofstream myfile;
// myfile.open ("octree.post.msh");
// mOctree.PrintGiDMesh(myfile);
// myfile.close();
//std::cout << "Generating the Octree finished" << std::endl;
}
///******************************************************************************************************************
///******************************************************************************************************************
///******************************************************************************************************************
///******************************************************************************************************************
void GenerateNodes()
{
Timer::Start("Generating Nodes");
std::vector<OctreeType::cell_type*> all_leaves;
mOctree.GetAllLeavesVector(all_leaves);
IndexPartition<std::size_t>(all_leaves.size()).for_each([&](std::size_t Index){
*(all_leaves[Index]->pGetDataPointer()) = ConfigurationType::AllocateData();
});
std::size_t last_id = mrBodyModelPart.NumberOfNodes() + 1;
//KRATOS_WATCH(all_leaves.size());
for (std::size_t i = 0; i < all_leaves.size(); i++)
{
//KRATOS_WATCH(i)
CellType* cell = all_leaves[i];
GenerateCellNode(cell, last_id);
}
Timer::Stop("Generating Nodes");
}
void GenerateCellNode(CellType* pCell, std::size_t& LastId)
{
for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center
{
ConfigurationType::cell_node_data_type* p_node = (*(pCell->pGetData()))[i_pos];
if(p_node == 0)
{
(*(pCell->pGetData()))[i_pos] = new ConfigurationType::cell_node_data_type;
(*(pCell->pGetData()))[i_pos]->Id() = LastId++;
//KRATOS_WATCH(LastId)
mOctreeNodes.push_back((*(pCell->pGetData()))[i_pos]);
SetNodeInNeighbours(pCell,i_pos,(*(pCell->pGetData()))[i_pos]);
}
}
}
void SetNodeInNeighbours(CellType* pCell, int Position, CellNodeDataType* pNode)
{
CellType::key_type point_key[3];
pCell->GetKey(Position, point_key);
for (std::size_t i_direction = 0; i_direction < 8; i_direction++) {
CellType::key_type neighbour_key[3];
if (pCell->GetNeighbourKey(Position, i_direction, neighbour_key)) {
CellType* neighbour_cell = mOctree.pGetCell(neighbour_key);
if (!neighbour_cell || (neighbour_cell == pCell))
continue;
std::size_t position = neighbour_cell->GetLocalPosition(point_key);
if((*neighbour_cell->pGetData())[position])
{
std::cout << "ERROR!! Bad Position calculated!!!!!!!!!!! position :" << position << std::endl;
continue;
}
(*neighbour_cell->pGetData())[position] = pNode;
}
}
}
void CalculateDistance2()
{
Timer::Start("Calculate Distances2");
ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray();
int nodes_size = nodes.size();
// // first of all we reset the node distance to 1.00 which is the maximum distnace in our normalized space.
//#pragma omp parallel for firstprivate(nodes_size)
// for(int i = 0 ; i < nodes_size ; i++)
// nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00;
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
//int leaves_size = leaves.size();
// for(int i = 0 ; i < leaves_size ; i++)
// CalculateNotEmptyLeavesDistance(leaves[i]);
IndexPartition<std::size_t>(nodes_size).for_each([&](std::size_t Index){
CalculateNodeDistance(*(nodes[Index]));
});
Timer::Stop("Calculate Distances2");
}
// void CalculateDistance3()
// {
// Timer::Start("Calculate Distances2");
// ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray();
// int nodes_size = nodes.size();
//// // first of all we reset the node distance to 1.00 which is the maximum distnace in our normalized space.
//#pragma omp parallel for firstprivate(nodes_size)
// for(int i = 0 ; i < nodes_size ; i++)
// nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00;
// std::vector<CellType*> leaves;
// mOctree.GetAllLeavesVector(leaves);
// int leaves_size = leaves.size();
// for(int i = 0 ; i < leaves_size ; i++)
// CalculateNotEmptyLeavesDistance(leaves[i]);
//#pragma omp parallel for firstprivate(nodes_size)
// for(int i = 0 ; i < nodes_size ; i++)
// {
// CalculateNodeDistance(*(nodes[i]));
// }
// Timer::Stop("Calculate Distances2");
// }
// void CalculateDistance4()
// {
// Timer::Start("Calculate Distances3");
// ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray();
// int nodes_size = nodes.size();
// std::vector<CellType*> leaves;
// mOctree.GetAllLeavesVector(leaves);
// int leaves_size = leaves.size();
//#pragma omp parallel for firstprivate(nodes_size)
// for(int i = 0 ; i < nodes_size ; i++)
// {
// CalculateNodeDistanceFromCell(*(nodes[i]));
// }
// Timer::Stop("Calculate Distances3");
// }
void CalculateDistance()
{
Timer::Start("Calculate Distances");
ConfigurationType::data_type& nodes = mOctreeNodes;
int nodes_size = nodes.size();
// first of all we reste the node distance to 1.00 which is the maximum distnace in our normalized space.
IndexPartition<std::size_t>(nodes_size).for_each([&](std::size_t Index){
nodes[Index]->Distance() = 1.00;
});
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
int leaves_size = leaves.size();
for(int i = 0 ; i < leaves_size ; i++)
CalculateNotEmptyLeavesDistance(leaves[i]);
for(int i_direction = 0 ; i_direction < 1 ; i_direction++)
{
//#pragma omp parallel for firstprivate(nodes_size)
for(int i = 0 ; i < nodes_size ; i++)
{
if(nodes[i]->X() < 1.00 && nodes[i]->Y() < 1.00 && nodes[i]->Z() < 1.00)
// if((*nodes[i])[i_direction] == 0.00)
CalculateDistance(*(nodes[i]), i_direction);
}
}
Timer::Stop("Calculate Distances");
}
void CalculateDistance(CellNodeDataType& rNode, int i_direction)
{
double coords[3] = {rNode.X(), rNode.Y(), rNode.Z()};
// KRATOS_WATCH_3(coords);
//This function must color the positions in space defined by 'coords'.
//coords is of dimension (3) normalized in (0,1)^3 space
typedef Element::GeometryType triangle_type;
typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type;
intersections_container_type intersections;
ConfigurationType::data_type nodes_array;
const double epsilon = 1e-12;
double distance = 1.0;
// Creating the ray
double ray[3] = {coords[0], coords[1], coords[2]};
ray[i_direction] = 0; // starting from the lower extreme
// KRATOS_WATCH_3(ray)
GetIntersectionsAndNodes(ray, i_direction, intersections, nodes_array);
// KRATOS_WATCH(nodes_array.size())
for (std::size_t i_node = 0; i_node < nodes_array.size() ; i_node++)
{
double coord = (*nodes_array[i_node])[i_direction];
// KRATOS_WATCH(intersections.size());
int ray_color= 1;
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
while (i_intersection != intersections.end()) {
double d = coord - i_intersection->first;
if (d > epsilon) {
ray_color = -ray_color;
distance = d;
} else if (d > -epsilon) {//interface
distance = 0.00;
break;
} else {
if(distance > -d)
distance = -d;
break;
}
i_intersection++;
}
distance *= ray_color;
double& node_distance = nodes_array[i_node]->Distance();
if(fabs(distance) < fabs(node_distance))
node_distance = distance;
else if (distance*node_distance < 0.00) // assigning the correct sign
node_distance = -node_distance;
}
}
void CalculateNotEmptyLeavesDistance(CellType* pCell)
{
//typedef Element::GeometryType triangle_type;
typedef OctreeType::cell_type::object_container_type object_container_type;
object_container_type* objects = (pCell->pGetObjects());
// There are no intersection in empty cells
if (objects->empty())
return;
for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center
{
double distance = 1.00; // maximum distance is 1.00
for(object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++)
{
CellType::key_type keys[3];
pCell->GetKey(i_pos,keys);
double cell_point[3];
mOctree.CalculateCoordinates(keys,cell_point);
double d = GeometryUtils::PointDistanceToTriangle3D((*i_object)->GetGeometry()[0], (*i_object)->GetGeometry()[1], (*i_object)->GetGeometry()[2], Point(cell_point[0], cell_point[1], cell_point[2]));
if(d < distance)
distance = d;
}
double& node_distance = (*(pCell->pGetData()))[i_pos]->Distance();
if(distance < node_distance)
node_distance = distance;
}
}
void CalculateNodeDistance(Node<3>& rNode)
{
double coord[3] = {rNode.X(), rNode.Y(), rNode.Z()};
double distance = DistancePositionInSpace(coord);
double& node_distance = rNode.GetSolutionStepValue(DISTANCE);
//const double epsilon = 1.00e-12;
if(fabs(node_distance) > fabs(distance))
node_distance = distance;
else if (distance*node_distance < 0.00) // assigning the correct sign
node_distance = -node_distance;
}
// void CalculateNodeDistanceFromCell(Node<3>& rNode)
// {
// OctreeType::key_type node_key[3] = {octree->CalcKeyNormalized(rNode.X()), octree->CalcKeyNormalized(rNode.Y()), octree->CalcKeyNormalized(rNode.Z())};
// OctreeType::cell_type* pcell = octree->pGetCell(node_key);
// object_container_type* objects = (pCell->pGetObjects());
// // We interpolate the cell distances for the node in empty cells
// if (objects->empty())
// {
// }
// double distance = DistancePositionInSpace(coord);
// double& node_distance = rNode.GetSolutionStepValue(DISTANCE);
// //const double epsilon = 1.00e-12;
// if(fabs(node_distance) > fabs(distance))
// node_distance = distance;
// else if (distance*node_distance < 0.00) // assigning the correct sign
// node_distance = -node_distance;
// }
double DistancePositionInSpace(double* coords)
{
//This function must color the positions in space defined by 'coords'.
//coords is of dimension (3) normalized in (0,1)^3 space
typedef Element::GeometryType triangle_type;
typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type;
intersections_container_type intersections;
const int dimension = 3;
const double epsilon = 1e-12;
double distances[3] = {1.0, 1.0, 1.0};
for (int i_direction = 0; i_direction < dimension; i_direction++)
{
// Creating the ray
double ray[3] = {coords[0], coords[1], coords[2]};
mOctree.NormalizeCoordinates(ray);
ray[i_direction] = 0; // starting from the lower extreme
GetIntersections(ray, i_direction, intersections);
// if(intersections.size() == 1)
// KRATOS_WATCH_3(ray)
// KRATOS_WATCH(intersections.size());
int ray_color= 1;
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
while (i_intersection != intersections.end()) {
double d = coords[i_direction] - i_intersection->first;
if (d > epsilon) {
ray_color = -ray_color;
distances[i_direction] = d;
// if(distances[i_direction] > d) // I think this is redundunt. Pooyan.
// {
// if(ray_color > 0.00)
// distances[i_direction] = d;
// else
// distances[i_direction] = -d;
// }
} else if (d > -epsilon) {//interface
distances[i_direction] = 0.00;
break;
} else {
if(distances[i_direction] > -d)
distances[i_direction] = -d;
break;
}
i_intersection++;
}
distances[i_direction] *= ray_color;
}
// if(distances[0]*distances[1] < 0.00 || distances[2]*distances[1] < 0.00)
// KRATOS_WATCH_3(distances);
double distance = (fabs(distances[0]) > fabs(distances[1])) ? distances[1] : distances[0];
distance = (fabs(distance) > fabs(distances[2])) ? distances[2] : distance;
return distance;
}
void GetIntersectionsAndNodes(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections, ConfigurationType::data_type& rNodesArray)
{
//This function passes the ray through the model and gives the hit point to all objects in its way
//ray is of dimension (3) normalized in (0,1)^3 space
// direction can be 0,1,2 which are x,y and z respectively
const double epsilon = 1.00e-12;
// first clearing the intersections points vector
intersections.clear();
OctreeType* octree = &mOctree;
OctreeType::key_type ray_key[3] = {octree->CalcKeyNormalized(ray[0]), octree->CalcKeyNormalized(ray[1]), octree->CalcKeyNormalized(ray[2])};
OctreeType::key_type cell_key[3];
// getting the entrance cell from lower extreme
ray_key[direction] = 0;
OctreeType::cell_type* cell = octree->pGetCell(ray_key);
while (cell) {
std::size_t position = cell->GetLocalPosition(ray_key); // Is this the local position!?!?!?!
OctreeType::key_type node_key[3];
cell->GetKey(position, node_key);
if((node_key[0] == ray_key[0]) && (node_key[1] == ray_key[1]) && (node_key[2] == ray_key[2]))
{
if(cell->pGetData())
{
if(cell->pGetData()->size() > position)
{
CellNodeDataType* p_node = (*cell->pGetData())[position];
if(p_node)
{
//KRATOS_WATCH(p_node->Id())
rNodesArray.push_back(p_node);
}
}
else
KRATOS_WATCH(cell->pGetData()->size())
}
}
// std::cout << ".";
GetCellIntersections(cell, ray, ray_key, direction, intersections);
// Add the cell's middle node if existed
// cell->GetKey(8, cell_key); // 8 is the central position
// ray_key[direction]=cell_key[direction]; // positioning the ray in the middle of cell in its direction
// position = cell->GetLocalPosition(ray_key);
// if(position < 27) // principal nodes
// {
// if(cell->pGetData())
// {
// if(cell->pGetData()->size() > position)
// {
// Node<3>* p_node = (*cell->pGetData())[position];
// if(p_node)
// {
// //KRATOS_WATCH(p_node->Id())
// rNodesArray.push_back(p_node);
// }
// }
// else
// KRATOS_WATCH(cell->pGetData()->size())
// }
// }
// else
// {
// KRATOS_WATCH(position);
// KRATOS_WATCH(*cell);
// }
// go to the next cell
if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) {
ray_key[direction] = cell_key[direction];
cell = octree->pGetCell(ray_key);
ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding
//cell get in pGetCell is the right one.
} else
cell = NULL;
}
// KRATOS_WATCH(rNodesArray.size());
// now eliminating the repeated objects
if (!intersections.empty()) {
//sort
std::sort(intersections.begin(), intersections.end());
// unique
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin();
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
while (++i_begin != intersections.end()) {
// considering the very near points as the same points
if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same
*(++i_intersection) = *i_begin;
}
intersections.resize((++i_intersection) - intersections.begin());
}
}
void GetIntersections(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections)
{
//This function passes the ray through the model and gives the hit point to all objects in its way
//ray is of dimension (3) normalized in (0,1)^3 space
// direction can be 0,1,2 which are x,y and z respectively
const double epsilon = 1.00e-12;
// first clearing the intersections points vector
intersections.clear();
OctreeType* octree = &mOctree;
OctreeType::key_type ray_key[3] = {octree->CalcKeyNormalized(ray[0]), octree->CalcKeyNormalized(ray[1]), octree->CalcKeyNormalized(ray[2])};
OctreeType::key_type cell_key[3];
// getting the entrance cell from lower extreme
OctreeType::cell_type* cell = octree->pGetCell(ray_key);
while (cell) {
// std::cout << ".";
GetCellIntersections(cell, ray, ray_key, direction, intersections);
// go to the next cell
if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) {
ray_key[direction] = cell_key[direction];
cell = octree->pGetCell(ray_key);
ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding
//cell get in pGetCell is the right one.
} else
cell = NULL;
}
// now eliminating the repeated objects
if (!intersections.empty()) {
//sort
std::sort(intersections.begin(), intersections.end());
// unique
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin();
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
while (++i_begin != intersections.end()) {
// considering the very near points as the same points
if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same
*(++i_intersection) = *i_begin;
}
intersections.resize((++i_intersection) - intersections.begin());
}
}
int GetCellIntersections(OctreeType::cell_type* cell, double* ray,
OctreeType::key_type* ray_key, int direction,
std::vector<std::pair<double, Element::GeometryType*> >& intersections) {
//This function passes the ray through the cell and gives the hit point to all objects in its way
//ray is of dimension (3) normalized in (0,1)^3 space
// direction can be 0,1,2 which are x,y and z respectively
//typedef Element::GeometryType triangle_type;
typedef OctreeType::cell_type::object_container_type object_container_type;
object_container_type* objects = (cell->pGetObjects());
// There are no intersection in empty cells
if (objects->empty())
return 0;
// std::cout << "X";
// calculating the two extreme of the ray segment inside the cell
double ray_point1[3] = {ray[0], ray[1], ray[2]};
double ray_point2[3] = {ray[0], ray[1], ray[2]};
double normalized_coordinate;
mOctree.CalculateCoordinateNormalized(ray_key[direction], normalized_coordinate);
ray_point1[direction] = normalized_coordinate;
ray_point2[direction] = ray_point1[direction] + mOctree.CalcSizeNormalized(cell);
mOctree.ScaleBackToOriginalCoordinate(ray_point1);
mOctree.ScaleBackToOriginalCoordinate(ray_point2);
for (object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++) {
double intersection[3]={0.00,0.00,0.00};
int is_intersected = IntersectionTriangleSegment((*i_object)->GetGeometry(), ray_point1, ray_point2, intersection); // This intersection has to be optimized for axis aligned rays
if (is_intersected == 1) // There is an intersection but not coplanar
intersections.push_back(std::pair<double, Element::GeometryType*>(intersection[direction], &((*i_object)->GetGeometry())));
//else if(is_intersected == 2) // coplanar case
}
return 0;
}
int IntersectionTriangleSegment(Element::GeometryType& rGeometry, double* RayPoint1, double* RayPoint2, double* IntersectionPoint)
{
// This is the adaption of the implemnetation provided in:
// http://www.softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm#intersect_RayTriangle()
const double epsilon = 1.00e-12;
array_1d<double,3> u, v, n; // triangle vectors
array_1d<double,3> dir, w0, w; // ray vectors
double r, a, b; // params to calc ray-plane intersect
// get triangle edge vectors and plane normal
u = rGeometry[1] - rGeometry[0];
v = rGeometry[2] - rGeometry[0];
MathUtils<double>::CrossProduct(n, u, v); // cross product
if (norm_2(n) == 0) // triangle is degenerate
return -1; // do not deal with this case
for(int i = 0 ; i < 3 ; i++)
{
dir[i] = RayPoint2[i] - RayPoint1[i]; // ray direction vector
w0[i] = RayPoint1[i] - rGeometry[0][i];
}
a = -inner_prod(n,w0);
b = inner_prod(n,dir);
if (fabs(b) < epsilon) { // ray is parallel to triangle plane
if (a == 0) // ray lies in triangle plane
return 2;
else return 0; // ray disjoint from plane
}
// get intersect point of ray with triangle plane
r = a / b;
if (r < 0.0) // ray goes away from triangle
return 0; // => no intersect
// for a segment, also test if (r > 1.0) => no intersect
for(int i = 0 ; i < 3 ; i++)
IntersectionPoint[i] = RayPoint1[i] + r * dir[i]; // intersect point of ray and plane
// is I inside T?
double uu, uv, vv, wu, wv, D;
uu = inner_prod(u,u);
uv = inner_prod(u,v);
vv = inner_prod(v,v);
for(int i = 0 ; i < 3 ; i++)
w[i] = IntersectionPoint[i] - rGeometry[0][i];
wu = inner_prod(w,u);
wv = inner_prod(w,v);
D = uv * uv - uu * vv;
// get and test parametric coords
double s, t;
s = (uv * wv - vv * wu) / D;
if (s < 0.0 - epsilon || s > 1.0 + epsilon) // I is outside T
return 0;
t = (uv * wu - uu * wv) / D;
if (t < 0.0 - epsilon || (s + t) > 1.0 + epsilon) // I is outside T
return 0;
return 1; // I is in T
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "CalculateSignedDistanceTo3DConditionSkinProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "CalculateSignedDistanceTo3DConditionSkinProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
void PrintGiDMesh(std::ostream & rOStream) const {
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
std::cout << "writing " << leaves.size() << " leaves" << std::endl;
rOStream << "MESH \"leaves\" dimension 3 ElemType Hexahedra Nnode 8" << std::endl;
rOStream << "# color 96 96 96" << std::endl;
rOStream << "Coordinates" << std::endl;
rOStream << "# node number coordinate_x coordinate_y coordinate_z " << std::endl;
for(ConfigurationType::data_type::const_iterator i_node = mOctreeNodes.begin() ; i_node != mOctreeNodes.end() ; i_node++)
{
rOStream << (*i_node)->Id() << " " << (*i_node)->X() << " " << (*i_node)->Y() << " " << (*i_node)->Z() << std::endl;
//mOctree.Insert(temp_point);
}
std::cout << "Nodes written..." << std::endl;
rOStream << "end coordinates" << std::endl;
rOStream << "Elements" << std::endl;
rOStream << "# element node_1 node_2 node_3 material_number" << std::endl;
for (std::size_t i = 0; i < leaves.size(); i++) {
if ((leaves[i]->pGetData()))
{
ConfigurationType::data_type& nodes = (*(leaves[i]->pGetData()));
rOStream << i + 1;
for(int j = 0 ; j < 8 ; j++)
rOStream << " " << nodes[j]->Id();
rOStream << std::endl;
}
}
rOStream << "end elements" << std::endl;
}
void PrintGiDResults(std::ostream & rOStream) const {
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
rOStream << "GiD Post Results File 1.0" << std::endl << std::endl;
rOStream << "Result \"Distance\" \"Kratos\" 1 Scalar OnNodes" << std::endl;
rOStream << "Values" << std::endl;
for(ConfigurationType::data_type::const_iterator i_node = mOctreeNodes.begin() ; i_node != mOctreeNodes.end() ; i_node++)
{
rOStream << (*i_node)->Id() << " " << (*i_node)->Distance() << std::endl;
}
rOStream << "End Values" << std::endl;
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart& mrSkinModelPart;
ModelPart& mrBodyModelPart;
ModelPart& mrFluidModelPart;
ConfigurationType::data_type mOctreeNodes;
OctreeType mOctree;
static const double epsilon;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
CalculateSignedDistanceTo3DConditionSkinProcess& operator=(CalculateSignedDistanceTo3DConditionSkinProcess const& rOther);
/// Copy constructor.
//CalculateSignedDistanceTo3DConditionSkinProcess(CalculateSignedDistanceTo3DConditionSkinProcess const& rOther);
///@}
}; // Class CalculateSignedDistanceTo3DConditionSkinProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
CalculateSignedDistanceTo3DConditionSkinProcess& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const CalculateSignedDistanceTo3DConditionSkinProcess& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
const double CalculateSignedDistanceTo3DConditionSkinProcess::epsilon = 1e-12;
} // namespace Kratos.
#endif // KRATOS_CALCULATE_DISTANCE_CONDITION_PROCESS_H_INCLUDED defined
|
parallel_omp.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
void init_array(uint64_t * array, uint64_t array_size){
uint64_t i;
for(i = 0; i < array_size; ++i){
array[i] = i+1;
}
}
uint64_t sum_array(uint64_t * array, uint64_t start_index, uint64_t end_index, uint64_t times){
uint64_t i,t;
uint64_t sum = 0;
#pragma omp parallel for reduction(+ : sum) private(t,i)
for(t = 0; t < times; ++t){
for(i = start_index; i < end_index; ++i){
sum += array[i];
}
}
return sum;
}
void check_sum(uint64_t array_size, uint64_t times, uint64_t sum){
uint64_t real_sum = times*((array_size*(array_size +1))/2);
if(real_sum == sum){
printf("Array sum is correct (%ld)\n",sum);
}
else{
printf("Array sum is NOT correct (%ld), should have been: %ld\n",sum, real_sum);
}
}
int main (int argc, char *argv[]){
uint64_t array_size = 0;
uint64_t times = 0;
uint64_t nbOfThreads = 0;
uint64_t * array;
if(argc != 4){
printf("usage: %s [array size] [times] [nbOfThreads]\n",argv[0]);
return 0;
}
array_size = atoi(argv[1]);
times = atoi(argv[2]);
nbOfThreads = atoi(argv[3]);
omp_set_num_threads(nbOfThreads);
printf("Array size: %lu\n",array_size);
printf("Sum times: %lu\n",times);
array = malloc(array_size* sizeof(uint64_t ));
if(array == NULL){
printf("Could not allocate Array... bye bye!\n");
return 0;
}
init_array(array, array_size);
uint64_t sum = sum_array(array, 0, array_size, times);
printf("sum is %ld\n",sum);
check_sum(array_size, times, sum);
}
|
GB_unop__isinf_bool_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__isinf_bool_fp32)
// op(A') function: GB (_unop_tran__isinf_bool_fp32)
// C type: bool
// A type: float
// cast: float cij = (aij)
// unaryop: cij = isinf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = isinf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (aij) ; \
Cx [pC] = isinf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__isinf_bool_fp32)
(
bool *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isinf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = (aij) ;
Cx [p] = isinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__isinf_bool_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c |
#include <stdlib.h>
#include <stdio.h>
#include "omp.h"
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define N 10
int main()
{
double a[N], a_h[N];
double b[N], c[N];
int fail = 0;
check_offloading();
long cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
// Test: basic with shared
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop shared(a)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: if clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop shared(a) if(0) //undeferred execution of task
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: private clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
int myId = -1;
#pragma omp target map(tofrom:a) map(to:b,c)
{
#pragma omp parallel
#pragma omp master
#pragma omp taskloop shared(a) private(myId)
for(int i = 0 ; i < N; i++) {
myId = 0;
a[i] += b[i] + c[i] + myId;
}
}
// myId == 0 for all iterations because we execute the entire loop on a single thread (the master)
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i] + 0;
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: firstprivate clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
myId = -1;
#pragma omp target map(tofrom:a) map(to:b,c)
{
#pragma omp parallel
#pragma omp master
#pragma omp taskloop shared(a) firstprivate(myId)
for(int i = 0 ; i < N; i++) {
myId += 0;
a[i] += b[i] + c[i] + myId;
}
}
// myId == 0 for all iterations because we execute the entire loop on a single thread (the master)+
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i] + (-1);
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: lastprivate clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
double lp = -1;
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop shared(a) lastprivate(myId)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
lp = a[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (lp != a[N-1]) {
printf("Latpriv Error device = %lf, host = %lf\n", lp, a_h[N-1]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: default clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop default(shared)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: grainsize
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop grainsize(3)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: num_tasks clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop num_tasks(5)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: collapse clause
fail = 0;
int ma[N][N], mb[N][N], mc[N][N];
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++) {
ma[i][j] = -1;
mb[i][j] = i;
mc[i][j] = 2*i;
}
#pragma omp target map(tofrom: ma) map(to: mb,mc)
#pragma omp parallel
#pragma omp single
#pragma omp taskloop collapse(2)
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
ma[i][j] += mb[i][j] + mc[i][j];
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
if (ma[i][j] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, ma[i][j], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: final clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop final(1)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: priority clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop priority(10)
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: untied clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop untied
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: mergeable clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop mergeable
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
// Test: nogroup clause
fail = 0;
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c) map(tofrom:lp)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop nogroup
for(int i = 0 ; i < N; i++) {
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
return 0;
}
|
convolution_winograd_transform_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_output_bf16s_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
const float bias0 = biasptr ? biasptr[p] : 0.f;
float tmp[6][8];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j);
const float* output0_tm_1 = output0_tm_0 + tiles * 1;
const float* output0_tm_2 = output0_tm_0 + tiles * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * 5;
const float* output0_tm_6 = output0_tm_0 + tiles * 6;
const float* output0_tm_7 = output0_tm_0 + tiles * 7;
// TODO neon optimize
for (int m = 0; m < 8; m++)
{
float tmp024a = output0_tm_1[0] + output0_tm_2[0];
float tmp135a = output0_tm_1[0] - output0_tm_2[0];
float tmp024b = output0_tm_3[0] + output0_tm_4[0];
float tmp135b = output0_tm_3[0] - output0_tm_4[0];
float tmp024c = output0_tm_5[0] + output0_tm_6[0];
float tmp135c = output0_tm_5[0] - output0_tm_6[0];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 8;
output0_tm_1 += tiles * 8;
output0_tm_2 += tiles * 8;
output0_tm_3 += tiles * 8;
output0_tm_4 += tiles * 8;
output0_tm_5 += tiles * 8;
output0_tm_6 += tiles * 8;
output0_tm_7 += tiles * 8;
}
unsigned short* output0 = out0.row<unsigned short>(i * 6) + j * 6;
for (int m = 0; m < 6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = float32_to_bfloat16(bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32);
output0[2] = float32_to_bfloat16(bias0 + tmp024a + tmp024b * 4 + tmp024c * 8);
output0[4] = float32_to_bfloat16(bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c);
output0[1] = float32_to_bfloat16(bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16);
output0[3] = float32_to_bfloat16(bias0 + tmp135a + tmp135b * 8 + tmp135c * 4);
output0[5] = float32_to_bfloat16(bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c);
output0 += outw;
}
}
}
}
}
|
trsm_x_dia_u_hi_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_INT main_diag_pos = 0;
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT i = 0; i < A->ndiag; i++)
if(A->distance[i] == 0)
{
main_diag_pos = i;
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = m - 1; r >= 0; r--)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ndiag = main_diag_pos + 1; ndiag < A->ndiag; ndiag++)
{
if (m - A->distance[ndiag] > r)
{
ALPHA_INT ac = r + A->distance[ndiag];
alpha_madde(temp, A->values[ndiag * A->lval + r], y[ac * ldy + out_y_col]);
}
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[r * ldx + out_y_col]);
alpha_sub(y[r * ldy + out_y_col], t, temp);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "coders/coders-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
if (type == 0)
SetPixelIndex(image,index,q);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (type != 0)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
Quantum
*q;
ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
unsigned char
*p,
*q;
ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
unsigned char
*p;
size_t
count,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,ReadPolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
switch (psd_info.mode)
{
case LabMode:
{
(void) SetImageColorspace(image,LabColorspace,exception);
break;
}
case CMYKMode:
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case BitmapMode:
case GrayscaleMode:
case DuotoneMode:
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case IndexedMode:
{
psd_info.min_channels=1;
break;
}
case MultichannelMode:
{
if ((psd_info.channels > 0) && (psd_info.channels < 3))
{
psd_info.min_channels=psd_info.channels;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
break;
}
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
ssize_t
i,
j;
unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
const Quantum
*p;
ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=IsRightsAuthorized(CoderPolicyDomain,WritePolicyRights,"PSD");
if (status == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
length,
num_channels;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) &&
(image_info->type != TrueColorAlphaType) &&
(image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
const char
*option;
CompressionType
compression;
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
option=GetImageOption(image_info,"psd:write-layers");
if (IsStringFalse(option) != MagickTrue)
{
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
(void) WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
(void) WriteBlobMSBLong(image,0); /* user mask data */
}
/*
Write composite image.
*/
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
cast_hcl_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: renzun@openailab.com
*/
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include <math.h>
#include "compiler_fp16.h"
#include "cast_param.h"
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct cast_param* cast_param = ( struct cast_param* )ir_node->op.param_mem;
int type_from = cast_param->type_from;
int type_to = cast_param->type_to;
int channel_num = input_tensor->dims[1];
int batch_number = input_tensor->dims[0];
int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
int num_thread = exec_graph->num_thread;
if (type_from == 1 && type_to == 2)
{
float* idata = ( float* )input_tensor->data;
__fp16* odata = ( __fp16* )output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < (channel_num * batch_number); i++)
{
int offset = i * channel_size;
for (int j = 0; j < channel_size; j++)
{
odata[j + offset] = fp32_to_fp16(idata[j + offset]);
}
}
}
if (type_from == 2 && type_to == 1)
{
__fp16* idata = ( __fp16* )input_tensor->data;
float* odata = ( float* )output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < (channel_num * batch_number); i++)
{
int offset = i * channel_size;
for (int j = 0; j < channel_size; j++)
{
odata[j + offset] = fp16_to_fp32(idata[j + offset]);
}
}
}
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
struct ir_node* ir_node = exec_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
if (input_tensor->layout != TENGINE_LAYOUT_NCHW)
return 0;
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_cast_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_CAST, &hcl_node_ops);
}
static int unreg_cast_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_CAST, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_cast_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_cast_hcl_ops);
|
task_dep3.c | /*
* @@name: task_dep.5c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
// Assume BS divides N perfectly
void matmul_depend(int N, int BS, float A[N][N], float B[N][N], float C[N][N] )
{
int i, j, k, ii, jj, kk;
for (i = 0; i < N; i+=BS) {
for (j = 0; j < N; j+=BS) {
for (k = 0; k < N; k+=BS) {
// Note 1: i, j, k, A, B, C are firstprivate by default
// Note 2: A, B and C are just pointers
#pragma omp task private(ii, jj, kk) \
depend ( in: A[i:BS][k:BS], B[k:BS][j:BS] ) \
depend ( inout: C[i:BS][j:BS] )
for (ii = i; ii < i+BS; ii++ )
for (jj = j; jj < j+BS; jj++ )
for (kk = k; kk < k+BS; kk++ )
C[ii][jj] = C[ii][jj] + A[ii][kk] * B[kk][jj];
}
}
}
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*9 + q*9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
if (type == 0)
SetPixelIndex(image,index,q);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (type != 0)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
register unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
register unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
register unsigned char
*p,
*q;
register ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
packet_size,
row_size;
register ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
register ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
register ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
imageListLength;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
else
if (psd_info.mode == IndexedMode)
psd_info.min_channels=1;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
imageListLength=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (imageListLength == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (imageListLength == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (imageListLength == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
GB_binop__le_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int16)
// A*D function (colscale): GB (_AxD__le_int16)
// D*A function (rowscale): GB (_DxB__le_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int16)
// C=scalar+B GB (_bind1st__le_int16)
// C=scalar+B' GB (_bind1st_tran__le_int16)
// C=A+scalar GB (_bind2nd__le_int16)
// C=A'+scalar GB (_bind2nd_tran__le_int16)
// C type: bool
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
8692.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp parallel for simd schedule(static) num_threads(2)
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
print.c | /*
Copyright (c) 2010-2011, Jun Namikawa <jnamika@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "utils.h"
#include "print.h"
#include "entropy.h"
#include "rnn_lyapunov.h"
static void fopen_array (
FILE **fp_array,
int size,
const char *template_filename,
const char *mode)
{
char str[7], *filename, *p;
int length = strlen(template_filename);
MALLOC(filename, length + 1);
strcpy(filename, template_filename);
p = strstr(filename, "XXXXXX");
if (p == NULL) {
REALLOC(filename, length + 8);
filename[length] = '.';
filename[length + 7] = '\0';
p = filename + length + 1;
}
for (int i = 0; i < size; i++) {
snprintf(str, sizeof(str), "%.6d", i);
memmove(p, str, 6);
fp_array[i] = fopen(filename, mode);
if (fp_array[i] == NULL) {
print_error_msg();
goto error;
}
}
FREE(filename);
return;
error:
exit(EXIT_FAILURE);
}
void init_output_files (
const struct general_parameters *gp,
const struct recurrent_neural_network *rnn,
struct output_files *fp_list,
const char *mode)
{
fp_list->array_size = rnn->series_num;
if (strlen(gp->iop.state_filename) > 0) {
MALLOC(fp_list->fp_wstate_array, fp_list->array_size);
fopen_array(fp_list->fp_wstate_array, fp_list->array_size,
gp->iop.state_filename, mode);
} else {
fp_list->fp_wstate_array = NULL;
}
if (strlen(gp->iop.closed_state_filename) > 0) {
MALLOC(fp_list->fp_wclosed_state_array, fp_list->array_size);
fopen_array(fp_list->fp_wclosed_state_array, fp_list->array_size,
gp->iop.closed_state_filename, mode);
} else {
fp_list->fp_wclosed_state_array = NULL;
}
if (strlen(gp->iop.weight_filename) > 0) {
fp_list->fp_wweight = fopen(gp->iop.weight_filename, mode);
if (fp_list->fp_wweight == NULL) goto error;
} else {
fp_list->fp_wweight = NULL;
}
if (strlen(gp->iop.threshold_filename) > 0) {
fp_list->fp_wthreshold = fopen(gp->iop.threshold_filename, mode);
if (fp_list->fp_wthreshold == NULL) goto error;
} else {
fp_list->fp_wthreshold = NULL;
}
if (strlen(gp->iop.tau_filename) > 0) {
fp_list->fp_wtau = fopen(gp->iop.tau_filename, mode);
if (fp_list->fp_wtau == NULL) goto error;
} else {
fp_list->fp_wtau = NULL;
}
if (strlen(gp->iop.sigma_filename) > 0) {
fp_list->fp_wsigma = fopen(gp->iop.sigma_filename, mode);
if (fp_list->fp_wsigma == NULL) goto error;
} else {
fp_list->fp_wsigma = NULL;
}
if (strlen(gp->iop.init_filename) > 0) {
fp_list->fp_winit = fopen(gp->iop.init_filename, mode);
if (fp_list->fp_winit == NULL) goto error;
} else {
fp_list->fp_winit = NULL;
}
if (strlen(gp->iop.adapt_lr_filename) > 0 && gp->mp.use_adaptive_lr) {
fp_list->fp_wadapt_lr = fopen(gp->iop.adapt_lr_filename, mode);
if (fp_list->fp_wadapt_lr == NULL) goto error;
} else {
fp_list->fp_wadapt_lr = NULL;
}
if (strlen(gp->iop.error_filename) > 0) {
fp_list->fp_werror = fopen(gp->iop.error_filename, mode);
if (fp_list->fp_werror == NULL) goto error;
} else {
fp_list->fp_werror = NULL;
}
if (strlen(gp->iop.closed_error_filename) > 0) {
fp_list->fp_wclosed_error = fopen(gp->iop.closed_error_filename, mode);
if (fp_list->fp_wclosed_error == NULL) goto error;
} else {
fp_list->fp_wclosed_error = NULL;
}
if (strlen(gp->iop.lyapunov_filename) > 0) {
fp_list->fp_wlyapunov = fopen(gp->iop.lyapunov_filename, mode);
if (fp_list->fp_wlyapunov == NULL) goto error;
} else {
fp_list->fp_wlyapunov = NULL;
}
if (strlen(gp->iop.entropy_filename) > 0) {
fp_list->fp_wentropy = fopen(gp->iop.entropy_filename, mode);
if (fp_list->fp_wentropy == NULL) goto error;
} else {
fp_list->fp_wentropy = NULL;
}
if (strlen(gp->iop.period_filename) > 0) {
fp_list->fp_wperiod = fopen(gp->iop.period_filename, mode);
if (fp_list->fp_wperiod == NULL) goto error;
} else {
fp_list->fp_wperiod = NULL;
}
return;
error:
print_error_msg();
exit(EXIT_FAILURE);
}
void free_output_files (struct output_files *fp_list)
{
if (fp_list->fp_wstate_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fclose(fp_list->fp_wstate_array[i]);
}
FREE(fp_list->fp_wstate_array);
}
if (fp_list->fp_wclosed_state_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fclose(fp_list->fp_wclosed_state_array[i]);
}
FREE(fp_list->fp_wclosed_state_array);
}
if (fp_list->fp_wweight) {
fclose(fp_list->fp_wweight);
}
if (fp_list->fp_wthreshold) {
fclose(fp_list->fp_wthreshold);
}
if (fp_list->fp_wtau) {
fclose(fp_list->fp_wtau);
}
if (fp_list->fp_wsigma) {
fclose(fp_list->fp_wsigma);
}
if (fp_list->fp_winit) {
fclose(fp_list->fp_winit);
}
if (fp_list->fp_wadapt_lr) {
fclose(fp_list->fp_wadapt_lr);
}
if (fp_list->fp_werror) {
fclose(fp_list->fp_werror);
}
if (fp_list->fp_wclosed_error) {
fclose(fp_list->fp_wclosed_error);
}
if (fp_list->fp_wlyapunov) {
fclose(fp_list->fp_wlyapunov);
}
if (fp_list->fp_wentropy) {
fclose(fp_list->fp_wentropy);
}
if (fp_list->fp_wperiod) {
fclose(fp_list->fp_wperiod);
}
}
static void print_general_parameters (
FILE *fp,
const struct general_parameters *gp)
{
fprintf(fp, "# seed = %lu\n", gp->mp.seed);
if (gp->mp.use_adaptive_lr) {
fprintf(fp, "# use_adaptive_lr\n");
}
fprintf(fp, "# rho = %f\n", gp->mp.rho);
fprintf(fp, "# momentum = %f\n", gp->mp.momentum);
fprintf(fp, "# delay_length = %d\n", gp->mp.delay_length);
fprintf(fp, "# lambda = %f\n", gp->mp.lambda);
fprintf(fp, "# alpha = %f\n", gp->mp.alpha);
fprintf(fp, "# truncate_length = %d\n", gp->ap.truncate_length);
fprintf(fp, "# block_length = %d\n", gp->ap.block_length);
fprintf(fp, "# divide_num = %d\n", gp->ap.divide_num);
fprintf(fp, "# lyapunov_spectrum_size = %d\n",
gp->ap.lyapunov_spectrum_size);
fprintf(fp, "# threshold_period = %g\n", gp->ap.threshold_period);
}
static void print_rnn_parameters (
FILE *fp,
const struct recurrent_neural_network *rnn)
{
fprintf(fp, "# in_state_size = %d\n", rnn->rnn_p.in_state_size);
fprintf(fp, "# c_state_size = %d\n", rnn->rnn_p.c_state_size);
fprintf(fp, "# out_state_size = %d\n", rnn->rnn_p.out_state_size);
if (rnn->rnn_p.output_type == STANDARD_TYPE) {
fprintf(fp, "# output_type = STANDARD_TYPE\n");
} else if (rnn->rnn_p.output_type == SOFTMAX_TYPE) {
fprintf(fp, "# output_type = SOFTMAX_TYPE\n");
for (int c = 0; c < rnn->rnn_p.softmax_group_num; c++) {
fprintf(fp, "# group%d = ", c);
for (int i = 0; i < rnn->rnn_p.out_state_size; i++) {
if (rnn->rnn_p.softmax_group_id[i] == c) {
fprintf(fp, "%d,", i);
}
}
fprintf(fp, "\n");
}
}
if (rnn->rnn_p.fixed_weight) {
fprintf(fp, "# fixed_weight\n");
}
if (rnn->rnn_p.fixed_threshold) {
fprintf(fp, "# fixed_threshold\n");
}
if (rnn->rnn_p.fixed_tau) {
fprintf(fp, "# fixed_tau\n");
}
if (rnn->rnn_p.fixed_init_c_state) {
fprintf(fp, "# fixed_init_c_state\n");
}
if (rnn->rnn_p.fixed_sigma) {
fprintf(fp, "# fixed_sigma\n");
}
fprintf(fp, "# target_num = %d\n", rnn->series_num);
for (int i = 0; i < rnn->series_num; i++) {
fprintf(fp, "# target %d\tlength = %d\n", i, rnn->rnn_s[i].length);
}
fprintf(fp, "# prior_strength = %f\n", rnn->rnn_p.prior_strength);
const struct rnn_parameters *rnn_p = &rnn->rnn_p;
for (int i = 0; i < rnn_p->c_state_size; i++) {
fprintf(fp, "# const_init_c[%d] = %d\n", i, rnn_p->const_init_c[i]);
}
for (int i = 0; i < rnn_p->c_state_size; i++) {
fprintf(fp, "# connection_weight_ci[%d] = ", i);
int I = 0;
while (rnn_p->connection_ci[i][I].begin != -1) {
int begin = rnn_p->connection_ci[i][I].begin;
int end = rnn_p->connection_ci[i][I].end;
fprintf(fp, "(%d,%d)", begin, end);
I++;
}
fprintf(fp, "\n");
}
for (int i = 0; i < rnn_p->c_state_size; i++) {
fprintf(fp, "# connection_weight_cc[%d] = ", i);
int I = 0;
while (rnn_p->connection_cc[i][I].begin != -1) {
int begin = rnn_p->connection_cc[i][I].begin;
int end = rnn_p->connection_cc[i][I].end;
fprintf(fp, "(%d,%d)", begin, end);
I++;
}
fprintf(fp, "\n");
}
for (int i = 0; i < rnn_p->out_state_size; i++) {
fprintf(fp, "# connection_weight_oc[%d] = ", i);
int I = 0;
while (rnn_p->connection_oc[i][I].begin != -1) {
int begin = rnn_p->connection_oc[i][I].begin;
int end = rnn_p->connection_oc[i][I].end;
fprintf(fp, "(%d,%d)", begin, end);
I++;
}
fprintf(fp, "\n");
}
}
static void print_rnn_weight (
FILE *fp,
long epoch,
const struct rnn_parameters *rnn_p)
{
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn_p->c_state_size; i++) {
for (int j = 0; j < rnn_p->in_state_size; j++) {
fprintf(fp, "\t%f", rnn_p->weight_ci[i][j]);
}
for (int j = 0; j < rnn_p->c_state_size; j++) {
fprintf(fp, "\t%f", rnn_p->weight_cc[i][j]);
}
}
for (int i = 0; i < rnn_p->out_state_size; i++) {
for (int j = 0; j < rnn_p->c_state_size; j++) {
fprintf(fp, "\t%f", rnn_p->weight_oc[i][j]);
}
}
fprintf(fp, "\n");
}
static void print_rnn_threshold (
FILE *fp,
long epoch,
const struct rnn_parameters *rnn_p)
{
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn_p->c_state_size; i++) {
fprintf(fp, "\t%f", rnn_p->threshold_c[i]);
}
for (int i = 0; i < rnn_p->out_state_size; i++) {
fprintf(fp, "\t%f", rnn_p->threshold_o[i]);
}
fprintf(fp, "\n");
}
static void print_rnn_tau (
FILE *fp,
long epoch,
const struct rnn_parameters *rnn_p)
{
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn_p->c_state_size; i++) {
fprintf(fp, "\t%g", rnn_p->tau[i]);
}
fprintf(fp, "\n");
}
static void print_rnn_sigma (
FILE *fp,
long epoch,
const struct rnn_parameters *rnn_p)
{
fprintf(fp, "%ld\t%f\t%f\n", epoch, rnn_p->sigma, rnn_p->variance);
}
static void print_rnn_init (
FILE *fp,
long epoch,
const struct recurrent_neural_network *rnn)
{
fprintf(fp, "# epoch = %ld\n", epoch);
for (int i = 0; i < rnn->series_num; i++) {
fprintf(fp, "%d", i);
for (int j = 0; j < rnn->rnn_p.c_state_size; j++) {
fprintf(fp, "\t%f", rnn->rnn_s[i].init_c_inter_state[j]);
}
fprintf(fp, "\n");
}
}
static void print_adapt_lr (
FILE *fp,
long epoch,
double adapt_lr)
{
fprintf(fp, "%ld\t%f\n", epoch, adapt_lr);
}
static void print_rnn_error (
FILE *fp,
long epoch,
const struct recurrent_neural_network *rnn)
{
double error[rnn->series_num];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < rnn->series_num; i++) {
error[i] = rnn_get_error(rnn->rnn_s + i);
error[i] /= rnn->rnn_s[i].length * rnn->rnn_p.out_state_size;
}
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn->series_num; i++) {
fprintf(fp, "\t%g", error[i]);
}
fprintf(fp, "\n");
}
static void print_rnn_state (
FILE *fp,
const struct rnn_state *rnn_s)
{
for (int n = 0; n < rnn_s->length; n++) {
fprintf(fp, "%d", n);
for (int i = 0; i < rnn_s->rnn_p->out_state_size; i++) {
fprintf(fp, "\t%f", rnn_s->teach_state[n][i]);
fprintf(fp, "\t%f", rnn_s->out_state[n][i]);
}
for (int i = 0; i < rnn_s->rnn_p->c_state_size; i++) {
//fprintf(fp, "\t%f", rnn_s->c_state[n][i]);
fprintf(fp, "\t%f", rnn_s->c_inter_state[n][i]);
}
fprintf(fp, "\n");
}
}
static void print_rnn_state_forall (
FILE **fp_array,
long epoch,
const struct recurrent_neural_network *rnn)
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < rnn->series_num; i++) {
fprintf(fp_array[i], "# epoch = %ld\n", epoch);
fprintf(fp_array[i], "# target:%d\n", i);
print_rnn_state(fp_array[i], rnn->rnn_s + i);
}
}
static void compute_lyapunov_spectrum_of_rnn_state (
const struct rnn_state *rnn_s,
int spectrum_size,
int delay_length,
int truncate_length,
double *spectrum)
{
if (rnn_s->length > truncate_length) {
struct rnn_lyapunov_info rl_info;
init_rnn_lyapunov_info(&rl_info, rnn_s, delay_length, truncate_length);
spectrum = rnn_lyapunov_spectrum(&rl_info, spectrum, spectrum_size);
if (spectrum == NULL) {
print_error_msg();
exit(EXIT_FAILURE);
}
free_rnn_lyapunov_info(&rl_info);
} else {
for (int i = 0; i < spectrum_size; i++) {
spectrum[i] = 0;
}
}
}
static void print_lyapunov_spectrum_of_rnn (
FILE *fp,
long epoch,
const struct recurrent_neural_network *rnn,
int spectrum_size,
int delay_length,
int truncate_length)
{
int max_num;
// decides spectrum_size which is the number to evaluate Lyapunov exponents
max_num = (rnn->rnn_p.in_state_size * delay_length) +
rnn->rnn_p.c_state_size;
if (max_num < spectrum_size || spectrum_size < 0) {
spectrum_size = max_num;
}
if (spectrum_size <= 0) return;
double **spectrum = NULL;
MALLOC2(spectrum, rnn->series_num, spectrum_size);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < rnn->series_num; i++) {
compute_lyapunov_spectrum_of_rnn_state(rnn->rnn_s + i, spectrum_size,
delay_length, truncate_length, spectrum[i]);
}
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn->series_num; i++) {
for (int j = 0; j < spectrum_size; j++) {
fprintf(fp, "\t%f", spectrum[i][j]);
}
}
fprintf(fp, "\n");
FREE2(spectrum);
}
/* assigns an index to the vector with respect to indexed hypercubes in the
* R^dimension space */
static inline int f2symbol (
const double x,
double min,
double max,
int divide_num)
{
int symbol;
double mesh_size, split;
mesh_size = (max - min)/divide_num;
symbol = 0;
split = min;
for (int i = 0; i < divide_num; i++) {
split += mesh_size;
if (x <= split || i == divide_num-1) {
symbol = i;
break;
}
}
return symbol;
}
static void compute_kl_divergence_of_rnn_state (
const struct rnn_state *rnn_s,
int truncate_length,
int block_length,
int divide_num,
double *kl_div,
double *entropy_t,
double *entropy_o,
double *gen_rate)
{
if (rnn_s->length > truncate_length) {
double min, max;
int **sequence_t, **sequence_o;
struct block_frequency bf_t, bf_o;
const int length = rnn_s->length - truncate_length;
if (rnn_s->rnn_p->output_type == STANDARD_TYPE) {
min = -1.0; max = 1.0;
} else {
min = 0.0; max = 1.0;
}
MALLOC2(sequence_t, length, rnn_s->rnn_p->out_state_size);
MALLOC2(sequence_o, length, rnn_s->rnn_p->out_state_size);
for (int n = 0; n < length; n++) {
int N = n + truncate_length;
for (int i = 0; i < rnn_s->rnn_p->out_state_size; i++) {
sequence_t[n][i] = f2symbol(rnn_s->teach_state[N][i],
min, max, divide_num);
sequence_o[n][i] = f2symbol(rnn_s->out_state[N][i], min,
max, divide_num);
}
}
init_block_frequency(&bf_t, (const int* const*)sequence_t,
rnn_s->rnn_p->out_state_size, length, block_length);
init_block_frequency(&bf_o, (const int* const*)sequence_o,
rnn_s->rnn_p->out_state_size, length, block_length);
*kl_div = kullback_leibler_divergence(&bf_t, &bf_o);
*entropy_t = block_entropy(&bf_t) / block_length;
*entropy_o = block_entropy(&bf_o) / block_length;
*gen_rate = generation_rate(&bf_t, &bf_o);
free_block_frequency(&bf_t);
free_block_frequency(&bf_o);
FREE2(sequence_t);
FREE2(sequence_o);
} else {
*kl_div = 0;
*entropy_t = 0;
*entropy_o = 0;
*gen_rate = 0;
}
}
static void print_kl_divergence_of_rnn (
FILE *fp,
long epoch,
const struct recurrent_neural_network *rnn,
int truncate_length,
int block_length,
int divide_num)
{
double kl_div[rnn->series_num];
double entropy_t[rnn->series_num];
double entropy_o[rnn->series_num];
double gen_rate[rnn->series_num];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < rnn->series_num; i++) {
compute_kl_divergence_of_rnn_state(rnn->rnn_s + i,
truncate_length, block_length, divide_num, kl_div + i,
entropy_t + i, entropy_o + i, gen_rate + i);
}
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn->series_num; i++) {
fprintf(fp, "\t%g\t%g\t%g\t%g", kl_div[i], gen_rate[i], entropy_t[i],
entropy_o[i]);
}
fprintf(fp, "\n");
}
static int get_period_of_rnn_state (
const struct rnn_state *rnn_s,
double threshold)
{
int period = 1;
for (int n = rnn_s->length - 2; n >= 0; n--, period++) {
double d = 0;
for (int i = 0; i < rnn_s->rnn_p->c_state_size; i++) {
double x = rnn_s->c_state[rnn_s->length-1][i] -
rnn_s->c_state[n][i];
d += x * x;
}
for (int i = 0; i < rnn_s->rnn_p->out_state_size; i++) {
double x = rnn_s->out_state[rnn_s->length-1][i] -
rnn_s->out_state[n][i];
d += x * x;
}
if (d <= threshold) {
break;
}
}
return period;
}
static void print_period_of_rnn (
FILE *fp,
long epoch,
const struct recurrent_neural_network *rnn,
double threshold)
{
int period[rnn->series_num];
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < rnn->series_num; i++) {
period[i] = get_period_of_rnn_state(rnn->rnn_s + i, threshold);
}
fprintf(fp, "%ld", epoch);
for (int i = 0; i < rnn->series_num; i++) {
fprintf(fp, "\t%d", period[i]);
}
fprintf(fp, "\n");
}
static int enable_print (
long epoch,
const struct print_interval *pi)
{
long interval;
if (pi->use_logscale_interval) {
interval = 1;
while (epoch >= 10 * interval) {
interval *= 10;
}
if (interval > pi->interval) {
interval = pi->interval;
}
} else {
interval = pi->interval;
}
return ((epoch % interval) == 0 && epoch >= pi->init && epoch <= pi->end);
}
static void print_parameters_with_epoch (
long epoch,
const struct general_parameters *gp,
const struct recurrent_neural_network *rnn,
struct output_files *fp_list)
{
if (fp_list->fp_wweight &&
enable_print(epoch, &gp->iop.interval_for_weight_file)) {
print_rnn_weight(fp_list->fp_wweight, epoch, &rnn->rnn_p);
}
if (fp_list->fp_wthreshold &&
enable_print(epoch, &gp->iop.interval_for_threshold_file)) {
print_rnn_threshold(fp_list->fp_wthreshold, epoch, &rnn->rnn_p);
}
if (fp_list->fp_wtau &&
enable_print(epoch, &gp->iop.interval_for_tau_file)) {
print_rnn_tau(fp_list->fp_wtau, epoch, &rnn->rnn_p);
}
if (fp_list->fp_wsigma &&
enable_print(epoch, &gp->iop.interval_for_sigma_file)) {
print_rnn_sigma(fp_list->fp_wsigma, epoch, &rnn->rnn_p);
fflush(fp_list->fp_wsigma);
}
if (fp_list->fp_winit &&
enable_print(epoch, &gp->iop.interval_for_init_file)) {
print_rnn_init(fp_list->fp_winit, epoch, rnn);
}
if (fp_list->fp_wadapt_lr &&
enable_print(epoch, &gp->iop.interval_for_adapt_lr_file)) {
print_adapt_lr(fp_list->fp_wadapt_lr, epoch, gp->inp.adapt_lr);
fflush(fp_list->fp_wadapt_lr);
}
}
static void print_open_loop_data_with_epoch (
long epoch,
const struct general_parameters *gp,
struct recurrent_neural_network *rnn,
struct output_files *fp_list)
{
int compute_forward_dynamics = 0;
if (fp_list->fp_werror &&
enable_print(epoch, &gp->iop.interval_for_error_file)) {
if (!compute_forward_dynamics) {
rnn_forward_dynamics_forall(rnn);
compute_forward_dynamics = 1;
}
print_rnn_error(fp_list->fp_werror, epoch, rnn);
fflush(fp_list->fp_werror);
}
if (fp_list->fp_wstate_array &&
enable_print(epoch, &gp->iop.interval_for_state_file)) {
if (!compute_forward_dynamics) {
rnn_forward_dynamics_forall(rnn);
compute_forward_dynamics = 1;
}
print_rnn_state_forall(fp_list->fp_wstate_array, epoch, rnn);
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wstate_array[i], "\n");
}
}
}
static void print_closed_loop_data_with_epoch (
long epoch,
const struct general_parameters *gp,
struct recurrent_neural_network *rnn,
struct output_files *fp_list)
{
int compute_forward_dynamics = 0;
if (fp_list->fp_wclosed_error &&
enable_print(epoch, &gp->iop.interval_for_closed_error_file)) {
if (!compute_forward_dynamics) {
rnn_forward_dynamics_in_closed_loop_forall(rnn,
gp->mp.delay_length);
compute_forward_dynamics = 1;
}
print_rnn_error(fp_list->fp_wclosed_error, epoch, rnn);
fflush(fp_list->fp_wclosed_error);
}
if (fp_list->fp_wclosed_state_array &&
enable_print(epoch, &gp->iop.interval_for_closed_state_file)) {
if (!compute_forward_dynamics) {
rnn_forward_dynamics_in_closed_loop_forall(rnn,
gp->mp.delay_length);
compute_forward_dynamics = 1;
}
print_rnn_state_forall(fp_list->fp_wclosed_state_array, epoch, rnn);
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wclosed_state_array[i], "\n");
}
}
if (fp_list->fp_wlyapunov &&
enable_print(epoch, &gp->iop.interval_for_lyapunov_file)) {
if (!compute_forward_dynamics) {
rnn_forward_dynamics_in_closed_loop_forall(rnn,
gp->mp.delay_length);
compute_forward_dynamics = 1;
}
print_lyapunov_spectrum_of_rnn(fp_list->fp_wlyapunov, epoch, rnn,
gp->ap.lyapunov_spectrum_size, gp->mp.delay_length,
gp->ap.truncate_length);
fflush(fp_list->fp_wlyapunov);
}
if (fp_list->fp_wentropy &&
enable_print(epoch, &gp->iop.interval_for_entropy_file)) {
if (!compute_forward_dynamics) {
rnn_forward_dynamics_in_closed_loop_forall(rnn,
gp->mp.delay_length);
compute_forward_dynamics = 1;
}
print_kl_divergence_of_rnn(fp_list->fp_wentropy, epoch, rnn,
gp->ap.truncate_length, gp->ap.block_length,
gp->ap.divide_num);
fflush(fp_list->fp_wentropy);
}
if (fp_list->fp_wperiod &&
enable_print(epoch, &gp->iop.interval_for_period_file)) {
if (!compute_forward_dynamics) {
rnn_forward_dynamics_in_closed_loop_forall(rnn,
gp->mp.delay_length);
compute_forward_dynamics = 1;
}
print_period_of_rnn(fp_list->fp_wperiod, epoch, rnn,
gp->ap.threshold_period);
fflush(fp_list->fp_wperiod);
}
}
void print_training_main_begin (
const struct general_parameters *gp,
const struct recurrent_neural_network *rnn,
struct output_files *fp_list)
{
if (fp_list->fp_wstate_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wstate_array[i], "# STATE FILE\n");
print_general_parameters(fp_list->fp_wstate_array[i], gp);
print_rnn_parameters(fp_list->fp_wstate_array[i], rnn);
}
}
if (fp_list->fp_wclosed_state_array) {
for (int i = 0; i < fp_list->array_size; i++) {
fprintf(fp_list->fp_wclosed_state_array[i], "# STATE FILE\n");
print_general_parameters(fp_list->fp_wclosed_state_array[i], gp);
print_rnn_parameters(fp_list->fp_wclosed_state_array[i], rnn);
}
}
if (fp_list->fp_wweight) {
fprintf(fp_list->fp_wweight, "# WEIGHT FILE\n");
print_general_parameters(fp_list->fp_wweight, gp);
print_rnn_parameters(fp_list->fp_wweight, rnn);
}
if (fp_list->fp_wthreshold) {
fprintf(fp_list->fp_wthreshold, "# THRESHOLD FILE\n");
print_general_parameters(fp_list->fp_wthreshold, gp);
print_rnn_parameters(fp_list->fp_wthreshold, rnn);
}
if (fp_list->fp_wtau) {
fprintf(fp_list->fp_wtau, "# TAU FILE\n");
print_general_parameters(fp_list->fp_wtau, gp);
print_rnn_parameters(fp_list->fp_wtau, rnn);
}
if (fp_list->fp_wsigma) {
fprintf(fp_list->fp_wsigma, "# SIGMA FILE\n");
print_general_parameters(fp_list->fp_wsigma, gp);
print_rnn_parameters(fp_list->fp_wsigma, rnn);
}
if (fp_list->fp_winit) {
fprintf(fp_list->fp_winit, "# INIT FILE\n");
print_general_parameters(fp_list->fp_winit, gp);
print_rnn_parameters(fp_list->fp_winit, rnn);
}
if (fp_list->fp_wadapt_lr) {
fprintf(fp_list->fp_wadapt_lr, "# ADAPT_LR FILE\n");
print_general_parameters(fp_list->fp_wadapt_lr, gp);
print_rnn_parameters(fp_list->fp_wadapt_lr, rnn);
}
if (fp_list->fp_werror) {
fprintf(fp_list->fp_werror, "# ERROR FILE\n");
print_general_parameters(fp_list->fp_werror, gp);
print_rnn_parameters(fp_list->fp_werror, rnn);
}
if (fp_list->fp_wclosed_error) {
fprintf(fp_list->fp_wclosed_error, "# ERROR FILE\n");
print_general_parameters(fp_list->fp_wclosed_error, gp);
print_rnn_parameters(fp_list->fp_wclosed_error, rnn);
}
if (fp_list->fp_wlyapunov) {
fprintf(fp_list->fp_wlyapunov, "# LYAPUNOV FILE\n");
print_general_parameters(fp_list->fp_wlyapunov, gp);
print_rnn_parameters(fp_list->fp_wlyapunov, rnn);
}
if (fp_list->fp_wentropy) {
fprintf(fp_list->fp_wentropy, "# ENTROPY FILE\n");
print_general_parameters(fp_list->fp_wentropy, gp);
print_rnn_parameters(fp_list->fp_wentropy, rnn);
}
if (fp_list->fp_wperiod) {
fprintf(fp_list->fp_wperiod, "# PERIOD FILE\n");
print_general_parameters(fp_list->fp_wperiod, gp);
print_rnn_parameters(fp_list->fp_wperiod, rnn);
}
}
void print_training_main_loop (
long epoch,
const struct general_parameters *gp,
struct recurrent_neural_network *rnn,
struct output_files *fp_list)
{
print_parameters_with_epoch(epoch, gp, rnn, fp_list);
print_open_loop_data_with_epoch(epoch, gp, rnn, fp_list);
print_closed_loop_data_with_epoch(epoch, gp, rnn, fp_list);
}
|
par_rap_communication.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
HYPRE_Int
hypre_GetCommPkgRTFromCommPkgA( hypre_ParCSRMatrix *RT,
hypre_ParCSRMatrix *A,
HYPRE_Int *fine_to_coarse,
HYPRE_Int *tmp_map_offd)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(RT);
hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int num_recvs_A = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A);
HYPRE_Int *recv_procs_A = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A);
HYPRE_Int *recv_vec_starts_A = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A);
HYPRE_Int num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A);
HYPRE_Int *send_procs_A = hypre_ParCSRCommPkgSendProcs(comm_pkg_A);
HYPRE_Int *send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A);
hypre_ParCSRCommPkg *comm_pkg;
HYPRE_Int num_recvs_RT;
HYPRE_Int *recv_procs_RT;
HYPRE_Int *recv_vec_starts_RT;
HYPRE_Int num_sends_RT;
HYPRE_Int *send_procs_RT;
HYPRE_Int *send_map_starts_RT;
HYPRE_Int *send_map_elmts_RT;
HYPRE_BigInt *col_map_offd_RT = hypre_ParCSRMatrixColMapOffd(RT);
HYPRE_Int num_cols_offd_RT = hypre_CSRMatrixNumCols( hypre_ParCSRMatrixOffd(RT));
HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(RT);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_BigInt *big_buf_data = NULL;
HYPRE_BigInt *send_big_elmts = NULL;
HYPRE_BigInt my_first_cpt;
HYPRE_Int i, j;
HYPRE_Int vec_len, vec_start;
HYPRE_Int num_procs, my_id;
HYPRE_Int ierr = 0;
HYPRE_Int num_requests;
HYPRE_Int offd_col, proc_num;
HYPRE_Int num_threads = hypre_NumThreads();
HYPRE_Int size, rest, ns, ne, start;
HYPRE_Int index;
HYPRE_Int *proc_mark;
HYPRE_Int *change_array;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/*--------------------------------------------------------------------------
* determine num_recvs, recv_procs and recv_vec_starts for RT
*--------------------------------------------------------------------------*/
proc_mark = hypre_CTAlloc(HYPRE_Int, num_recvs_A, HYPRE_MEMORY_HOST);
for (i = 0; i < num_recvs_A; i++)
{
proc_mark[i] = 0;
}
proc_num = 0;
num_recvs_RT = 0;
if (num_cols_offd_RT)
{
for (i = 0; i < num_recvs_A; i++)
{
for (j = recv_vec_starts_A[i]; j < recv_vec_starts_A[i + 1]; j++)
{
offd_col = tmp_map_offd[proc_num];
if (offd_col == j)
{
proc_mark[i]++;
proc_num++;
if (proc_num == num_cols_offd_RT) { break; }
}
}
if (proc_mark[i]) { num_recvs_RT++; }
if (proc_num == num_cols_offd_RT) { break; }
}
}
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, send_map_starts_A[num_sends_A], HYPRE_MEMORY_HOST);
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
my_first_cpt = hypre_ParCSRMatrixColStarts(RT)[0];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) { coarse_shift = coarse_counter[j - 1]; }
size = n_fine / num_threads;
rest = n_fine - size * num_threads;
if (j < rest)
{
ns = j * size + j;
ne = (j + 1) * size + j + 1;
}
else
{
ns = j * size + rest;
ne = (j + 1) * size + rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
index = 0;
for (i = 0; i < num_sends_A; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg_A, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg_A, i + 1); j++)
big_buf_data[index++] = my_first_cpt +
(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg_A, j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg_A, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
for (i = 0; i < num_cols_offd_RT; i++)
{
col_map_offd_RT[i] = fine_to_coarse_offd[tmp_map_offd[i]];
}
hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
//hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
recv_procs_RT = hypre_CTAlloc(HYPRE_Int, num_recvs_RT, HYPRE_MEMORY_HOST);
recv_vec_starts_RT = hypre_CTAlloc(HYPRE_Int, num_recvs_RT + 1, HYPRE_MEMORY_HOST);
j = 0;
recv_vec_starts_RT[0] = 0;
for (i = 0; i < num_recvs_A; i++)
{
if (proc_mark[i])
{
recv_procs_RT[j] = recv_procs_A[i];
recv_vec_starts_RT[j + 1] = recv_vec_starts_RT[j] + proc_mark[i];
j++;
}
}
/*--------------------------------------------------------------------------
* send num_changes to recv_procs_A and receive change_array from send_procs_A
*--------------------------------------------------------------------------*/
num_requests = num_recvs_A + num_sends_A;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
change_array = hypre_CTAlloc(HYPRE_Int, num_sends_A, HYPRE_MEMORY_HOST);
j = 0;
for (i = 0; i < num_sends_A; i++)
hypre_MPI_Irecv(&change_array[i], 1, HYPRE_MPI_INT, send_procs_A[i], 0, comm,
&requests[j++]);
for (i = 0; i < num_recvs_A; i++)
hypre_MPI_Isend(&proc_mark[i], 1, HYPRE_MPI_INT, recv_procs_A[i], 0, comm,
&requests[j++]);
hypre_MPI_Waitall(num_requests, requests, status);
hypre_TFree(proc_mark, HYPRE_MEMORY_HOST);
/*--------------------------------------------------------------------------
* if change_array[i] is 0 , omit send_procs_A[i] in send_procs_RT
*--------------------------------------------------------------------------*/
num_sends_RT = 0;
for (i = 0; i < num_sends_A; i++)
if (change_array[i])
{
num_sends_RT++;
}
send_procs_RT = hypre_CTAlloc(HYPRE_Int, num_sends_RT, HYPRE_MEMORY_HOST);
send_map_starts_RT = hypre_CTAlloc(HYPRE_Int, num_sends_RT + 1, HYPRE_MEMORY_HOST);
j = 0;
send_map_starts_RT[0] = 0;
for (i = 0; i < num_sends_A; i++)
{
if (change_array[i])
{
send_procs_RT[j] = send_procs_A[i];
send_map_starts_RT[j + 1] = send_map_starts_RT[j] + change_array[i];
j++;
}
}
/*--------------------------------------------------------------------------
* generate send_map_elmts
*--------------------------------------------------------------------------*/
send_map_elmts_RT = hypre_CTAlloc(HYPRE_Int, send_map_starts_RT[num_sends_RT], HYPRE_MEMORY_HOST);
send_big_elmts = hypre_CTAlloc(HYPRE_BigInt, send_map_starts_RT[num_sends_RT], HYPRE_MEMORY_HOST);
j = 0;
for (i = 0; i < num_sends_RT; i++)
{
vec_start = send_map_starts_RT[i];
vec_len = send_map_starts_RT[i + 1] - vec_start;
hypre_MPI_Irecv(&send_big_elmts[vec_start], vec_len, HYPRE_MPI_BIG_INT,
send_procs_RT[i], 0, comm, &requests[j++]);
}
for (i = 0; i < num_recvs_RT; i++)
{
vec_start = recv_vec_starts_RT[i];
vec_len = recv_vec_starts_RT[i + 1] - vec_start;
hypre_MPI_Isend(&col_map_offd_RT[vec_start], vec_len, HYPRE_MPI_BIG_INT,
recv_procs_RT[i], 0, comm, &requests[j++]);
}
hypre_MPI_Waitall(j, requests, status);
for (i = 0; i < send_map_starts_RT[num_sends_RT]; i++)
{
send_map_elmts_RT[i] = (HYPRE_Int)(send_big_elmts[i] - first_col_diag);
}
comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(comm_pkg) = num_sends_RT;
hypre_ParCSRCommPkgNumRecvs(comm_pkg) = num_recvs_RT;
hypre_ParCSRCommPkgSendProcs(comm_pkg) = send_procs_RT;
hypre_ParCSRCommPkgRecvProcs(comm_pkg) = recv_procs_RT;
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg) = recv_vec_starts_RT;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg) = send_map_starts_RT;
hypre_ParCSRCommPkgSendMapElmts(comm_pkg) = send_map_elmts_RT;
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(send_big_elmts, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixCommPkg(RT) = comm_pkg;
hypre_TFree(change_array, HYPRE_MEMORY_HOST);
return ierr;
}
HYPRE_Int
hypre_GenerateSendMapAndCommPkg(MPI_Comm comm, HYPRE_Int num_sends, HYPRE_Int num_recvs,
HYPRE_Int *recv_procs, HYPRE_Int *send_procs,
HYPRE_Int *recv_vec_starts, hypre_ParCSRMatrix *A)
{
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
HYPRE_Int i, j;
HYPRE_Int num_requests = num_sends + num_recvs;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
HYPRE_Int vec_len, vec_start;
hypre_ParCSRCommPkg *comm_pkg;
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(A);
HYPRE_BigInt *send_big_elmts = NULL;
/*--------------------------------------------------------------------------
* generate send_map_starts and send_map_elmts
*--------------------------------------------------------------------------*/
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);
j = 0;
for (i = 0; i < num_sends; i++)
hypre_MPI_Irecv(&send_map_starts[i + 1], 1, HYPRE_MPI_INT, send_procs[i], 0, comm,
&requests[j++]);
for (i = 0; i < num_recvs; i++)
{
vec_len = recv_vec_starts[i + 1] - recv_vec_starts[i];
hypre_MPI_Isend(&vec_len, 1, HYPRE_MPI_INT, recv_procs[i], 0, comm, &requests[j++]);
}
hypre_MPI_Waitall(j, requests, status);
send_map_starts[0] = 0;
for (i = 0; i < num_sends; i++)
{
send_map_starts[i + 1] += send_map_starts[i];
}
send_map_elmts = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
send_big_elmts = hypre_CTAlloc(HYPRE_BigInt, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
j = 0;
for (i = 0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i + 1] - vec_start;
hypre_MPI_Irecv(&send_big_elmts[vec_start], vec_len, HYPRE_MPI_BIG_INT,
send_procs[i], 0, comm, &requests[j++]);
}
for (i = 0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i + 1] - vec_start;
hypre_MPI_Isend(&col_map_offd[vec_start], vec_len, HYPRE_MPI_BIG_INT,
recv_procs[i], 0, comm, &requests[j++]);
}
hypre_MPI_Waitall(j, requests, status);
for (i = 0; i < send_map_starts[num_sends]; i++)
{
send_map_elmts[i] = (HYPRE_Int)(send_big_elmts[i] - first_col_diag);
}
comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(comm_pkg) = num_sends;
hypre_ParCSRCommPkgNumRecvs(comm_pkg) = num_recvs;
hypre_ParCSRCommPkgSendProcs(comm_pkg) = send_procs;
hypre_ParCSRCommPkgRecvProcs(comm_pkg) = recv_procs;
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg) = recv_vec_starts;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg) = send_map_starts;
hypre_ParCSRCommPkgSendMapElmts(comm_pkg) = send_map_elmts;
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(send_big_elmts, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixCommPkg(A) = comm_pkg;
return 0;
}
|
GB_binop__islt_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__islt_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__islt_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int8)
// A*D function (colscale): GB (_AxD__islt_int8)
// D*A function (rowscale): GB (_DxB__islt_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int8)
// C=scalar+B GB (_bind1st__islt_int8)
// C=scalar+B' GB (_bind1st_tran__islt_int8)
// C=A+scalar GB (_bind2nd__islt_int8)
// C=A'+scalar GB (_bind2nd_tran__islt_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT8 || GxB_NO_ISLT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__islt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Grid3D.h | //
// Grid3D.h
// Copyright (c) 2016-2018
// author: Douglas Creel
//
#ifndef GRID3D_H
#define GRID3D_H
#include "Vesuvius.h"
enum GRIDBEHAVIOR {
CLAMPED,
REPEAT,
ZERO
};
template <typename T>
class Grid3D
{
public:
Grid3D()
{
}
~Grid3D()
{
}
void init(int nx, int ny, int nz, T val)
{
m_nvoxels = nx*ny*nz;
m_xsize = nx;
m_ysize = ny;
m_zsize = nz;
data.resize(m_nvoxels);
#pragma omp parallel for
for (int i = 0; i < m_nvoxels; i++)
{
data[i] = val;
}
}
int IX(int x, int y, int z)
{
if (!(x < m_xsize)) x = m_xsize - 1;
if (!(y < m_ysize)) y = m_ysize - 1;
if (!(z < m_zsize)) z = m_zsize - 1;
if (!(x > 0)) x = 0;
if (!(y > 0)) y = 0;
if (!(z > 0)) z = 0;
return x * m_ysize * m_zsize + y * m_zsize + z;
}
inline void set(T f, int x, int y, int z)
{
data[IX(x, y, z)] = f;
}
typename T& operator() (int x, int y, int z)
{
if (m_behavior == CLAMPED)
{
if (!(x < m_xsize)) x = m_xsize - 1;
if (!(y < m_ysize)) y = m_ysize - 1;
if (!(z < m_zsize)) z = m_zsize - 1;
if (!(x > 0)) x = 0;
if (!(y > 0)) y = 0;
if (!(z > 0)) z = 0;
}
else if (m_behavior == ZERO)
{
T retval;
if ((x < 0 || x > m_xsize) || (y < 0 || y > m_ysize) || (z < 0 || z > m_zsize)) return retval;
}
assert(x >= 0 && x < m_xsize);
assert(y >= 0 && y < m_ysize);
assert(z >= 0 && z < m_zsize);
return data[IX(x, y, z)];
}
inline typename T& operator[] (int i)
{
assert(i >= 0 && i < m_nvoxels);
return data[i];
}
inline void setIndexValue(int i, T f)
{
data[i] = f;
}
inline int getNumVoxels()
{
return m_nvoxels;
}
inline int getXSize()
{
return m_xsize;
}
inline int getYSize()
{
return m_ysize;
}
inline int getZSize()
{
return m_zsize;
}
inline void setBoundaryBehavior(GRIDBEHAVIOR mode)
{
m_behavior = mode;
}
inline void setZero()
{
#pragma omp parallel for
for (int i = 0; i < m_nvoxels; i++)
{
data[i] = 0.0;
}
}
inline T maxval() const
{
T r = 0;
for (int i = 0; i < m_nvoxels; i++)
if (!(std::fabs(data[i]) <= r)) r = std::fabs(data[i]);
return r;
}
inline void clear()
{
data.clear();
}
typename T trilinearInterpolation(T x, T y, T z)
{
T retval;
T c0, c1, c2, c3, c4, c5, c6, c7;
int i = (int)abs(floor(x));
int j = (int)abs(floor(y));
int k = (int)abs(floor(z));
c0 = (i + 1 - x) * (j + 1 - y) * (k + 1 - z) * data[IX(i, j, k)];
c1 = (x - i) * (j + 1 - y) * (k + 1 - z) * data[IX(i + 1, j, k)];
c2 = (i + 1 - x) * (y - j) * (k + 1 - z) * data[IX(i, j + 1, k)];
c3 = (x - i) * (y - j) * (k + 1 - z) * data[IX(i + 1, j + 1, k)];
c4 = (i + 1 - x) * (j + 1 - y) * (z - k) * data[IX(i, j, k + 1)];
c5 = (x - i) * (j + 1 - y) * (z - k) * data[IX(i + 1, j, k + 1)];
c6 = (i + 1 - x) * (y - j) * (z - k) * data[IX(i, j + 1, k + 1)];
c7 = (x - i) * (y - j) * (z - k) * data[IX(i + 1, j + 1, k + 1)];
retval = c0 + c1 + c2 + c3 + c4 + c5 + c6 + c7;
return retval;
}
//
// code attribution: "Visual Simulation of Smoke", Fedkiw, Stam, Jensen
//
typename T monotonicCubicInterpolation(T t, T f[4])
{
T retval;
T d_k = T(0.5) * (f[2] - f[0]);
T d_k1 = T(0.5) * (f[3] - f[1]);
T delta_k = f[2] - f[1];
if (delta_k == static_cast<T>(0))
{
d_k = static_cast<T>(0);
d_k1 = static_cast<T>(0);
}
T a0 = f[1];
T a1 = d_k;
T a2 = (T(3) * delta_k) - (T(2) * d_k) - d_k1;
T a3 = d_k + d_k1 - (T(2) * delta_k);
T t1 = t;
T t2 = t * t;
T t3 = t2 * t1;
retval = a3 * t3 + a2 * t2 + a1 * t1 + a0;
return retval;
}
typename T cubicInterpolation(T x, T p[4])
{
T retval;
retval = p[1] + 0.5 * x * (p[2] - p[0] + x * (2.0 * p[0] - 5.0 * p[1] + 4.0 * p[2] - p[3] + x * (3.0 * (p[1] - p[2]) + p[3] - p[0])));
return retval;
}
typename T bicubicInterpolation(T x, T y, T p[4][4])
{
T retval;
T interps[4];
interps[0] = cubicInterpolation(y, p[0]);
interps[1] = cubicInterpolation(y, p[1]);
interps[2] = cubicInterpolation(y, p[2]);
interps[3] = cubicInterpolation(y, p[3]);
//retval = monotonicCubicInterpolation(x, interps);
retval = cubicInterpolation(x, interps);
return retval;
}
typename T tricubicInterpolation(T x, T y, T z)
{
T retval;
int i = (int)abs(floor(x));
int j = (int)abs(floor(y));
int k = (int)abs(floor(z));
T p[4][4][4];
for (int nj = j - 1, xj = 0; nj < j + 3; nj++, xj++)
{
for (int nk = k - 1, xk = 0; nk < k + 3; nk++, xk++)
{
p[0][xj][xk] = data[IX(i - 1, nj, nk)];
p[1][xj][xk] = data[IX(i, nj, nk)];
p[2][xj][xk] = data[IX(i + 1, nj, nk)];
p[3][xj][xk] = data[IX(i + 2, nj, nk)];
}
}
T interps[4];
interps[0] = bicubicInterpolation(y - j, z - k, p[0]);
interps[1] = bicubicInterpolation(y - j, z - k, p[1]);
interps[2] = bicubicInterpolation(y - j, z - k, p[2]);
interps[3] = bicubicInterpolation(y - j, z - k, p[3]);
//retval = monotonicCubicInterpolation(x - i, interps);
retval = cubicInterpolation(x - i, interps);
return retval;
}
private:
std::vector<T> data;
int m_xsize, m_ysize, m_zsize;
int m_nvoxels;
GRIDBEHAVIOR m_behavior = CLAMPED;
};
#endif
|
GB_unop__asin_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__asin_fc64_fc64
// op(A') function: GB_unop_tran__asin_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = casin (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = casin (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = casin (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASIN || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__asin_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = casin (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = casin (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__asin_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_uint32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint32_fc32)
// op(A') function: GB (_unop_tran__identity_uint32_fc32)
// C type: uint32_t
// A type: GxB_FC32_t
// cast: uint32_t cij = GB_cast_to_uint32_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint32_fc32)
(
uint32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
uint32_t z = GB_cast_to_uint32_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__isgt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint8)
// A*D function (colscale): GB (_AxD__isgt_uint8)
// D*A function (rowscale): GB (_DxB__isgt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint8)
// C=scalar+B GB (_bind1st__isgt_uint8)
// C=scalar+B' GB (_bind1st_tran__isgt_uint8)
// C=A+scalar GB (_bind2nd__isgt_uint8)
// C=A'+scalar GB (_bind2nd_tran__isgt_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_UINT8 || GxB_NO_ISGT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isgt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ``fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ``classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "magick/studio.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
MagickRealType
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
MagickRealType
tau;
ssize_t
left,
right;
MagickRealType
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
MagickRealType
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static MagickRealType
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
FreeNodes(IntervalTree *),
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *),
ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const MagickRealType cluster_threshold,
% const MagickRealType weighting_exponent,
% const MagickBooleanType verbose)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This MagickRealType represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const MagickRealType cluster_threshold,
const MagickRealType weighting_exponent,const MagickBooleanType verbose)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExceptionInfo
*exception;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
MagickRealType
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register MagickRealType
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p));
cluster->green.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelGreen(p));
cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p));
cluster->count++;
break;
}
p++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(MagickRealType) i*(MagickRealType) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*cluster;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(indexes+x,0);
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
if (((ssize_t) ScaleQuantumToChar(q->red) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->red) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->green) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->green) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->blue) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->blue) <=
(cluster->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(indexes+x,cluster->id);
break;
}
}
if (cluster == (Cluster *) NULL)
{
MagickRealType
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)-
(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->green)-
(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->blue)-
(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)-
(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->green)-
(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->blue)-
(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(indexes+x,j);
}
}
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const MagickRealType *histogram,
% MagickRealType *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of MagickRealTypes representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of MagickRealTypes is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const MagickRealType *histogram,
MagickRealType *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% MagickPixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This MagickRealType represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
MagickPixelPacket *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
MagickRealType
threshold;
register const PixelPacket
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelRed(p));
cluster->green.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelGreen(p));
cluster->blue.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelBlue(p));
cluster->count++;
break;
}
p++;
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++;
p++;
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register MagickRealType
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(MagickRealType) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
root->mean_stability=0.0;
root->stability=0.0;
(void) memset(list,0,TreeLength*sizeof(*list));
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
MagickRealType
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(MagickRealType *) AcquireCriticalMemory(256*sizeof(*derivative));
second_derivative=(MagickRealType *) AcquireCriticalMemory(256*
sizeof(*second_derivative));
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(MagickRealType) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(MagickRealType *) RelinquishMagickMemory(derivative);
second_derivative=(MagickRealType *)
RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
{
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(MagickRealType) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const MagickRealType tau,
% MagickRealType *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of MagickRealTypes representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau,
MagickRealType *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap");
alpha=1.0/(tau*sqrt(2.0*MagickPI));
beta=(-1.0/(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=(MagickRealType) (alpha*sum);
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace);
InitializeHistogram(image,histogram,&image->exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose);
(void) TransformImageColorspace(image,previous_colorspace);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(MagickRealType *second_derivative,
% const MagickRealType smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of MagickRealTypes representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(MagickRealType *second_derivative,
const MagickRealType smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
jacobi-sse.c | #include <immintrin.h>
void kernel(double* v1, double * v2, int m)
{
__m128d alpha = _mm_set1_pd(0.25);
//
__m128d phi_e = _mm_loadu_pd (v1 + 1 );
__m128d phi_w = _mm_loadu_pd (v1 - 1 );
__m128d phi_n = _mm_loadu_pd (v1 + m);
__m128d phi_s = _mm_loadu_pd (v1 - m);
//
phi_e = _mm_add_pd(phi_e, phi_s);
phi_e = _mm_add_pd(phi_e, phi_n);
phi_e = _mm_add_pd(phi_e, phi_w);
phi_e = _mm_mul_pd(alpha, phi_e);
//
_mm_storeu_pd(v2, phi_e);
}
void laplacian(double* v1, double* v2, int dim_m, int dim_n)
{
int m = dim_m;
//
#pragma omp parallel for //schedule(static)
for (int j = 1; j < dim_m - 1; ++j )
{
for (int i = 1; i < dim_n - 1 - (dim_n - 1)%2; i = i + 2)
{
kernel(v1 + j*dim_m + i, v2 + j*dim_m + i, dim_m);
}
}
}
|
relu_hcl_arm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: qtang@openailab.com
*/
#ifndef _RELU_KERNEL_ARM_H_
#define _RELU_KERNEL_ARM_H_
#include <arm_neon.h>
#include "../../../../../include/tengine_ir.h"
#include "../../../../op/relu_param.h"
static int perf_relu_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
int batch = input_tensor->dims[0] ? input_tensor->dims[0] : 1;
int channels = input_tensor->dims[1] ? input_tensor->dims[1] : 1;
int h = output_tensor->dims[2] ? output_tensor->dims[2] : 1;
int w = input_tensor->dims[3] ? input_tensor->dims[3] : 1;
int size = h * w;
int c_step = h * w;
int b_step = channels * h * w;
float* input_data = (float*)input_tensor->data;
float* out_data = (float*)output_tensor->data;
if (negative_slope == 0)
{
for (int n = 0; n < batch; n++)
{
float* input = input_data + n * b_step;
float* output = out_data + n * b_step;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input + c_step * q;
float* dst = output + c_step * q;
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _zero = vdupq_n_f32(0.f);
for (; nn > 0; nn--)
{
float32x4_t _p = vld1q_f32(src);
_p = vmaxq_f32(_p, _zero);
vst1q_f32(dst, _p);
src += 4;
dst += 4;
}
#endif
for (; remain > 0; remain--)
{
if (src[0] < 0)
dst[0] = 0;
else
dst[0] = src[0];
src++;
dst++;
}
}
}
}
else
{
for (int n = 0; n < batch; n++)
{
float* input = input_data + n * b_step;
float* output = out_data + n * b_step;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input + c_step * q;
float* dst = output + c_step * q;
#if __ARM_NEON
int nn = size >> 2;
int remain = size - (nn << 2);
#else
int remain = size;
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _zero = vdupq_n_f32(0.f);
float32x4_t _slope = vdupq_n_f32(negative_slope);
for (; nn > 0; nn--)
{
float32x4_t _p = vld1q_f32(src);
uint32x4_t _lemask = vcleq_f32(_p, _zero);
float32x4_t _ps = vmulq_f32(_p, _slope);
_p = vbslq_f32(_lemask, _ps, _p);
vst1q_f32(dst, _p);
src += 4;
dst += 4;
}
#endif
for (; remain > 0; remain--)
{
if (src[0] < 0)
dst[0] = src[0] * negative_slope;
else
dst[0] = src[0];
src++;
dst++;
}
}
}
}
return 0;
}
#endif
|
GB_binop__ldexp_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ldexp_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__ldexp_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__ldexp_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__ldexp_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ldexp_fp64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__ldexp_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__ldexp_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ldexp_fp64)
// C=scalar+B GB (_bind1st__ldexp_fp64)
// C=scalar+B' GB (_bind1st_tran__ldexp_fp64)
// C=A+scalar GB (_bind2nd__ldexp_fp64)
// C=A'+scalar GB (_bind2nd_tran__ldexp_fp64)
// C type: double
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = ldexp (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ldexp (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LDEXP || GxB_NO_FP64 || GxB_NO_LDEXP_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ldexp_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ldexp_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ldexp_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ldexp_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ldexp_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ldexp_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ldexp_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ldexp_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ldexp_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = ldexp (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ldexp_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = ldexp (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = ldexp (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__ldexp_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = ldexp (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__ldexp_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pcp.h | /**
* Author: Kartik Lakhotia
Sourav Pati
* Email id: klakhoti@usc.edu
spati@usc.edu
* Date: 27-Feb-2018
*
* This code implements work optimized propagation blocking with
* transposed bin graph to reduce cache misses in scatter
*/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <time.h>
#include <cmath>
#include <vector>
#include <algorithm>
#include <omp.h>
#include <assert.h>
#include <string.h>
#include <immintrin.h>
#include "../include/gas.h"
intV binWidth = (256*1024)/sizeof(float); //512kB
unsigned int binOffsetBits = (unsigned int)std::log2((double)binWidth);
intV NUM_BINS = 10000000/binWidth;
#define DEBUG
#undef DEBUG
//////////////////////////////////////////
// performance monitoring via PCM
//////////////////////////////////////////
#define PERF_MON
#undef PERF_MON
//////////////////////////////////////////
// level 2 debugging - asserts enabled
//////////////////////////////////////////
#define DEBUGL2
#undef DEBUGL2
#define ITERTIME
#undef ITERTIME
int NUM_THREADS = std::max(atoi(std::getenv("OMP_NUM_THREADS")), 1);
unsigned int MAX_ITER = 1000000;
template<class graph>
void initialize(graph* G, int argc, char** argv)
{
G->start = 1;
G->rounds= 3;
for (int i = 1; i < argc; i++)
{
if (i + 1 != argc)
{
if (strcmp(argv[i], "-s") == 0) // This is your parameter name
{
G->start = (intV)atoi(argv[i + 1]); // The next value in the array is your value
i++; // Move to the next flag
}
if (strcmp(argv[i], "-t") == 0) // This is your parameter name
{
NUM_THREADS = (unsigned int)atoi(argv[i + 1]); // The next value in the array is your value
i++; // Move to the next flag
}
if (strcmp(argv[i], "-iter") == 0) // This is your parameter name
{
MAX_ITER = (unsigned int)atoi(argv[i + 1]); // The next value in the array is your value
i++; // Move to the next flag
}
if (strcmp(argv[i], "-rounds") == 0) // This is your parameter name
{
G->rounds = (unsigned int)atoi(argv[i + 1]); // The next value in the array is your value
i++; // Move to the next flag
}
}
}
if (argc < 2)
{
printf("Usage : %s <filename> -s <start node> -t <numThreads(optional)> -iter <#iterations(optional) -rounds <#rounds(default 3)> \n", argv[0]);
exit(1);
}
omp_set_num_threads(NUM_THREADS);
//////////////////////////////////////////
// read csr file
//////////////////////////////////////////
if (read_csr(argv[1], G)==-1)
{
printf("couldn't read %s\n", argv[1]);
exit(1);
}
intV numVerticesPerBin= (G->numVertex/(NUM_THREADS*4));
numVerticesPerBin = (numVerticesPerBin < binWidth) ? numVerticesPerBin : binWidth;
intV pow2=1;
while(pow2<=numVerticesPerBin)
pow2*=2;
pow2/=2;
if(pow2==0) binWidth=4;
else binWidth = pow2;
NUM_BINS = (G->numVertex-1)/binWidth + 1;
G->numBins = NUM_BINS;
printf("number of partitions %d, size of partitions %d\n", NUM_BINS, binWidth);
binOffsetBits = (unsigned int)std::log2((double)binWidth);
//////////////////////////////////////////
//initialize graph frontier, degree etc.//
//////////////////////////////////////////
initGraph (G);
}
template<class type, class graph>
void initBin(graph* G)
{
//////////////////////////////////////////
//static work allocation to threads
//equal no. of edges to all bins
//////////////////////////////////////////
G->TD = (partitionData*) malloc (sizeof(partitionData)*NUM_BINS);
partition(G->TD, G);
printf("partitioning successful\n");
//////////////////////////////////////////////////
//compute storage space required for each bin and
//offsets for storage in bins for a partition
//1 column -> 1 gather bin; 1 row -> 1 scatter bin
//bin[i][j] -> stores what i sends to j
//////////////////////////////////////////////////
G->updateBinAddrSize = allocateBinMat<intE>(NUM_BINS, NUM_BINS);
G->destIdBinAddrSize = allocateBinMat<intE>(NUM_BINS, NUM_BINS);
G->binFlag = allocateBinMat<bool>(NUM_BINS, NUM_BINS);
G->activeBins = allocateBinMat<intV>(NUM_BINS, NUM_BINS);
// struct timespec preStart, preEnd;
// float preTime;
// if( clock_gettime(CLOCK_REALTIME, &preStart) == -1) { perror("clock gettime");}
//////////////////////////////////////////
//// transpose and compute offsets ///////
//////////////////////////////////////////
#pragma omp parallel for schedule (dynamic, 1)
for (intV i=0; i<NUM_BINS; i++)
transposePartition(G, &(G->TD[i]), G->updateBinAddrSize[i], G->destIdBinAddrSize[i]);
printf("PNG construction successful\n");
// if( clock_gettime( CLOCK_REALTIME, &preEnd) == -1 ) { perror("clock gettime");}
// preTime = (preEnd.tv_sec - preStart.tv_sec)+ (int)(preEnd.tv_nsec - preStart.tv_nsec)/1e9;
// printf("%s, preprocessing time - %lf\n", argv[1], preTime);
//////////////////////////////////////////
//////////////// BINNING ////////////////
//////////////////////////////////////////
//////////////////////////////////////////
////individual bins to->fro each partition //////
//////////////////////////////////////////
G->indUpdateBins = allocateBinMatPtr<type>(NUM_BINS, NUM_BINS);
G->indDestIdBins = allocateBinMatPtr<intV>(NUM_BINS, NUM_BINS);
G->sparseDestIdBins = allocateBinMatPtr<intV>(NUM_BINS, NUM_BINS);
#ifdef WEIGHTED
G->indWeightBins = allocateBinMatPtr<unsigned int>(NUM_BINS, NUM_BINS);
#endif
#pragma omp parallel for num_threads(NUM_THREADS) schedule(dynamic, 1)
for (intV i=0; i<NUM_BINS; i++)
{
for (intV j=0; j<NUM_BINS; j++)
{
G->indUpdateBins[i][j] = new type [G->destIdBinAddrSize[i][j]];
G->indDestIdBins[i][j] = new intV [G->destIdBinAddrSize[i][j]];
G->sparseDestIdBins[i][j] = new intV [G->destIdBinAddrSize[i][j]];
#ifdef WEIGHTED
G->indWeightBins[i][j] = new unsigned int [G->destIdBinAddrSize[i][j]];
#endif
}
}
//pointers for each (i,j) bin for later use //
G->updateBinPointers = allocateBinMat<intE>(NUM_BINS, NUM_BINS);
G->destIdBinPointers = allocateBinMat<intE>(NUM_BINS, NUM_BINS);
#pragma omp parallel for num_threads(NUM_THREADS) schedule (dynamic, 1)
for (intV i=0; i<NUM_BINS; i++)
{
#ifdef WEIGHTED
writeDestIds(G, &G->TD[i], G->indDestIdBins[i], G->indWeightBins[i], G->destIdBinPointers[i]);
#else
writeDestIds(G, &G->TD[i], G->indDestIdBins[i], G->destIdBinPointers[i]);
#endif
}
#ifdef DEBUG
printf("binning complete\n");
#endif
//////////////////////////////////////////
//////////// BINNING COMPLETE ////////////
//////////////////////////////////////////
}
template<class type, class graph, class userArg>
void scatter_and_gather(graph* G, userArg UA)
{
#ifdef ITERTIME
float time;
struct timespec scatterStart, scatterEnd,gatherStart,gatherEnd,start,end;
float scatterTime = 0.0,gatherTime=0.0;
struct timespec iterStart,iterEnd;
float iterTime;
if( clock_gettime(CLOCK_REALTIME, &start) == -1) { perror("clock gettime");}
#endif
intV numActiveBins;
///////////////////////////////////////
////Set FLAG For Scatter and Gather////
///////////////////////////////////////
#ifndef DENSE
G->frontierSize = 0;
#endif
// printf("\n");
// for(intV i=0;i<NUM_BINS; i++)
// {
// for (intV j=0; j<G->TD[i].frontierSize; j++)
// printf("%d, ", G->TD[i].frontier[j]);
// }
// printf("\n");
#ifdef ITERTIME
if( clock_gettime(CLOCK_REALTIME, &scatterStart) == -1) { perror("clock gettime");}
#endif
numActiveBins = G->partListPtr;
#ifndef DENSE
G->partListPtr = 0;
#ifndef ASYNCH
#pragma omp parallel for num_threads(NUM_THREADS) schedule (dynamic, 1)
for (intV i=0; i<numActiveBins; i++)
densityCheck(&G->TD[G->activeScatter[i]]);
#endif
#endif
#pragma omp parallel for schedule(dynamic,1) num_threads(NUM_THREADS)
for (intV ptr=0; ptr<numActiveBins; ptr++)
{
intV i = G->activeScatter[ptr];
#ifdef ASYNCH
sgMix(G, &G->TD[i], G->indUpdateBins, G->indDestIdBins, G->sparseDestIdBins, G->TD, G->destIdBinAddrSize, G->destIdBinPointers, G->updateBinPointers, G->scatterDone, UA);
#else
scatter<type>(G, &G->TD[i], G->indUpdateBins[i], G->sparseDestIdBins[i], G->updateBinPointers[i], G->destIdBinPointers[i], UA);
#endif
}
#pragma omp parallel for
for (intV ptr=0; ptr<numActiveBins; ptr++)
G->scatterDone[G->activeScatter[ptr]] = false; //reset scatter done status of partitions
#ifdef ITERTIME
if( clock_gettime( CLOCK_REALTIME, &scatterEnd) == -1 ) { perror("clock gettime");}
scatterTime += (scatterEnd.tv_sec - scatterStart.tv_sec) + (float)(scatterEnd.tv_nsec - scatterStart.tv_nsec)/1e9;
if( clock_gettime(CLOCK_REALTIME, &gatherStart) == -1) { perror("clock gettime");}
#endif
#pragma omp parallel for schedule(dynamic,1) num_threads(NUM_THREADS)
for (intV ptr=0; ptr<G->partListPtr; ptr++)
{
intV i=G->activeGather[ptr];
gather<type>(G, &G->TD[i], G->indUpdateBins, G->indDestIdBins, G->sparseDestIdBins, G->TD, G->destIdBinAddrSize, G->destIdBinPointers, G->updateBinPointers, UA);
G->activeScatter[ptr] = i;
}
#ifdef ITERTIME
if( clock_gettime( CLOCK_REALTIME, &gatherEnd) == -1 ) { perror("clock gettime");}
gatherTime += (gatherEnd.tv_sec - gatherStart.tv_sec) + (float)(gatherEnd.tv_nsec - gatherStart.tv_nsec)/1e9;
#endif
#ifdef ITERTIME
if( clock_gettime(CLOCK_REALTIME, &iterEnd) == -1) { perror("clock gettime");}
iterTime = (iterEnd.tv_sec - iterStart.tv_sec)+ (int)(iterEnd.tv_nsec - iterStart.tv_nsec)/1e9;
printf("scatter time= %lf gather time = %lf, total time = %lf\n", scatterTime,gatherTime,scatterTime+gatherTime);
#endif
// free allocated memory//
// freeMem(&G);
// free(TD);
// freeMat<intE>(updateBinAddrSize, NUM_BINS+1);
// freeMat<intE>(destIdBinAddrSize, NUM_BINS+1);
// freeMat<intE>(updateBinPointers, NUM_BINS);
// freeMat<intE>(destIdBinPointers, NUM_BINS);
// freeMatPtr<type>(indUpdateBins, NUM_BINS,NUM_BINS);
// freeMatPtr<intV>(indDestIdBins, NUM_BINS,NUM_BINS);
//#ifdef WEIGHTED
// freeMatPtr<unsigned int>(indWeightBins, NUM_BINS, NUM_BINS);
//#endif
}
|
loadcpu.c | /*
| CPU benchmark program
| loads all available CPU until Ctrl+C
|
| compile using:
| cc ./loadcpu.c -o loadcpu -fopenmp
|
| _______________________________________
|
| (c) cjayho, 2020+
|
| This program is distributed under the terms of
| 3-clause BSD license
|
*/
#include <stdio.h>
#include <omp.h>
int main( void )
{
char* banner = "\n\
\
|\n\
| loadCPU simple benchmark\n\
| loads all available cpu cores in parallel\n\
| \n\
| Stop using Ctrl-C\n\
| ______________________________________________\n\
|\n\
| (c) cjayho, 2020+\n\
| This program is distributed under the terms\n\
| of 3-clause BSD license.\n\
|\n";
puts( banner );
#pragma omp parallel
while(1)
{
; // nop
}
}
|
info.c | #include "math.h"
#include <stdio.h>
#include <stdlib.h>
#define _DISP
//#define _LABEL
struct number{
int num[4];
int flag;
};
double LABEL[15]={360,1440,1260,264,9,480,720,216,8,180,72,6,24,5040,1};
struct number initarray[5040];
inline void num2p(int num,int *p){
int i;
for(i=0;i<4;i++)
*(p++)=0;
i=3;
while(num){
*(--p)=num%10;
num=num/10;
}
}
inline int check1(int * p){
int i,j;
for(i=0;i<4;i++){
for(j=i+1;j<4;j++){
if(p[i]==p[j])
return 0;
}
}
return 1;
}
void PreInitArray(){
int i,j;
int cnt=0;
int numt[4];
//struct number * arrayp=initarray;
for(i=123;i<=9876;i++){
num2p(i,numt);
if(check1(numt)){
initarray[cnt].flag=1;
for(j=0;j<4;j++)
{
initarray[cnt].num[j]=numt[j];
}
cnt++;
}
}
#ifdef _LABEL
for(i=0;i<15;i++)
{
LABEL[i]=LABEL[i]/5040;
LABEL[i]=log(1/LABEL[i]);
printf("%9f",LABEL[i]);
}
#endif
printf("\nPre Iint Over!\n");
}
void InitArray(struct number * nump){
int i,j;
for(i=0;i<5040;i++){
for(j=0;j<4;j++)
nump[i].num[j]=initarray[i].num[j];
nump[i].flag=1;
}
}
inline void check2(int * num0,int *numg,int *a,int *b){
int i,j;
*a=0;
*b=0;
for(i=0;i<4;i++){
if(num0[i]==numg[i])
(*a)++;
for(j=0;j<4;j++){
if(num0[i]==numg[j])
(*b)++;
}
}
(*b)-=(*a);
}
double Division(struct number * array,double cnt,int *nump){
int hist[15]={0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0};
int i;
//for(i=0;i<15;i++)
// hist[i]=0;
int ta,tb;
for(i=0;i<5040;i++){
if(array[i].flag){
check2(array[i].num,nump,&ta,&tb);
hist[ta*(11-ta)/2+tb]++;
}
}
double div=0;
double temp;
for(i=0;i<15;i++){
if(hist[i]!=0)
{
temp=hist[i]/cnt;
#ifdef _LABEL
temp=LABEL[i]*temp;
#endif
div+=-temp*log(temp);
}
}
return div;
}
int BestDivision(struct number * array,int count){
double best=0.0;
int bestindex=-1;
double best2=0.0;
int bestindex2=-1;
double new;
int i;
double cnt=0.0;
for(i=0;i<5040;i++)
cnt+=array[i].flag;
// printf("shengyu cnt:%f\n",cnt);
if(cnt<1.1){
for(i=0;i<5040;i++){
if(array[i].flag)
return i;
}
}
// cnt=cnt/13.0;
/*
if(count<=1){
for(i=0;i<5040;i++){
if(array[i].flag){
new=Division(array,cnt,array[i].num);
if(best>new){
best=new;
bestindex=i;
}
}
}
return bestindex;
}else
*/
{
for(i=0;i<5040;i++){
if( array[i].flag)
{
new=Division(array,cnt,array[i].num);
// if(new<0.001)
// break;
if(best<new){
best=new;
bestindex=i;
}
}
else{
new=Division(array,cnt,array[i].num);
// if(new<0.001)
// break;
if(best2<new){
best2=new;
bestindex2=i;
}
}
}
if(best2>best)
return bestindex2;
// printf("best min:%f\n",best);
return bestindex;
}
}
int CCguess(int * num){
int numg[4];
int cnt=0;
int i;
int a,b,ta,tb;
int ans;
struct number array[5040];
//printf("Begin Init!\n");
InitArray(array);
//printf("Init Over!\n");
for(i=0;i<4;i++)
numg[i]=i;
while(1){
// printf("numg:%d%d%d%d",numg[0],numg[1],numg[2],numg[3]);
check2(num,numg,&a,&b);
//printf("a:%d,b:%d\n",a,b);
cnt++;
if(a==4&&b==0)
return cnt;
if(cnt>9)
return 0;
for(i=0;i<5040;i++){
if(array[i].flag){
check2(array[i].num,numg,&ta,&tb);
array[i].flag=(ta==a && tb==b);
}
}
// printf("best Error\n");
ans=BestDivision(array,cnt);
// printf("Error: ans:%dcnt:%d\n",ans,cnt);
for(i=0;i<4;i++)
numg[i]=array[ans].num[i];
}
}
int main(){
PreInitArray();
int i,j,cnt=0;
int ans;
int hist[11];
for(i=0;i<11;i++)
hist[i]=0;
#pragma omp parallel for
for(i=0;i<5040;i++){
ans=CCguess(initarray[i].num);
hist[ans]++;
for(j=0;j<4;j++)
printf("%d",initarray[i].num[j]);
printf(",%d ",ans);
if(ans==0){
printf("\nError!\n");
//break;
exit(1);
}
if(i%10==9)
printf("\n");
}
printf("time:");
for(j=1;j<11;j++)
printf("%5d",j);
printf("\n ");
for(j=1;j<11;j++){
cnt+=hist[j]*j;
printf("%5d",hist[j]);
}
printf("\naverage cnt:%12f\n",cnt/(5040+0.0));
return 1;
}
|
parallel_priority_queue.h | /***************************************************************************
* include/stxxl/bits/containers/parallel_priority_queue.h
*
* Part of the STXXL. See http://stxxl.sourceforge.net
*
* Copyright (C) 2014-2015 Thomas Keh <thomas.keh@student.kit.edu>
* Copyright (C) 2014-2015 Timo Bingmann <tb@panthema.net>
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
**************************************************************************/
#ifndef STXXL_CONTAINERS_PARALLEL_PRIORITY_QUEUE_HEADER
#define STXXL_CONTAINERS_PARALLEL_PRIORITY_QUEUE_HEADER
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <ctime>
#include <list>
#include <utility>
#include <numeric>
#include <vector>
#if STXXL_PARALLEL
#include <omp.h>
#include <parallel/algorithm>
#include <parallel/numeric>
#endif
#include <stxxl/bits/common/winner_tree.h>
#include <stxxl/bits/common/custom_stats.h>
#include <stxxl/bits/common/mutex.h>
#include <stxxl/bits/common/timer.h>
#include <stxxl/bits/common/is_heap.h>
#include <stxxl/bits/common/swap_vector.h>
#include <stxxl/bits/common/rand.h>
#include <stxxl/bits/config.h>
#include <stxxl/bits/io/request_operations.h>
#include <stxxl/bits/mng/block_alloc.h>
#include <stxxl/bits/mng/buf_ostream.h>
#include <stxxl/bits/mng/prefetch_pool.h>
#include <stxxl/bits/mng/block_manager.h>
#include <stxxl/bits/mng/read_write_pool.h>
#include <stxxl/bits/mng/typed_block.h>
#include <stxxl/bits/namespace.h>
#include <stxxl/bits/noncopyable.h>
#include <stxxl/bits/parallel.h>
#include <stxxl/bits/verbose.h>
#include <stxxl/types>
STXXL_BEGIN_NAMESPACE
namespace ppq_local {
/*!
* A random-access iterator class for block oriented data. The iterator is
* intended to be provided by the internal_array and external_array classes
* and to be used by the multiway_merge algorithm as input iterators.
*
* \tparam ValueType the value type
*/
template <class ValueType>
class ppq_iterator
{
public:
typedef ValueType value_type;
typedef value_type& reference;
typedef value_type* pointer;
typedef ptrdiff_t difference_type;
typedef std::random_access_iterator_tag iterator_category;
typedef std::vector<std::pair<pointer, pointer> > block_pointers_type;
protected:
typedef ppq_iterator self_type;
//! pointer to a vector of begin/end pointer pairs
//! They allow access to the data blocks.
const block_pointers_type* m_block_pointers;
//! pointer to the current element
pointer m_current;
//! index of the current element
size_t m_index;
//! index of the current element's block
size_t m_block_index;
//! size of each data block
size_t m_block_items;
public:
//! default constructor (should not be used directly)
ppq_iterator()
: m_block_pointers(NULL)
{ }
//! constructor
//!
//! \param block_pointers A reference to the properly initialized vector of begin and end pointers.
//! One pair for each block. The pointers should be valid for all blocks that
//! are expected to be accessed with this iterator.
//! \param block_items The size of a single block. If there is only one block (e.g. if the iterator
//! belongs to an internal_array), use the total size here.
//! \param index The index of the current element (global - index 0 belongs to the first element
//! in the first block, no matter if the values are still valid)
ppq_iterator(const block_pointers_type* block_pointers, size_t block_items,
size_t index)
: m_block_pointers(block_pointers),
m_index(index),
m_block_items(block_items)
{
update();
}
//! returns the value's index in the internal or external array
size_t get_index() const
{
return m_index;
}
reference operator * () const
{
assert(m_current);
return *m_current;
}
pointer operator -> () const
{
return &(operator * ());
}
reference operator [] (difference_type relative_index) const
{
const difference_type index = m_index + relative_index;
const size_t block_index = index / m_block_items;
const size_t local_index = index % m_block_items;
assert(block_index < m_block_pointers->size());
assert((*m_block_pointers)[block_index].first + local_index
< (*m_block_pointers)[block_index].second);
return *((*m_block_pointers)[block_index].first + local_index);
}
//! prefix-increment operator
self_type& operator ++ ()
{
++m_index;
++m_current;
if (UNLIKELY(m_current == (*m_block_pointers)[m_block_index].second)) {
if (m_block_index + 1 < m_block_pointers->size()) {
m_current = (*m_block_pointers)[++m_block_index].first;
}
else {
// global end
assert(m_block_index + 1 == m_block_pointers->size());
m_current = (*m_block_pointers)[m_block_index++].second;
}
}
return *this;
}
//! prefix-decrement operator
self_type& operator -- ()
{
assert(m_index > 0);
--m_index;
if (m_block_index >= m_block_pointers->size()
|| m_current == (*m_block_pointers)[m_block_index].first) {
// begin of current block or global end
assert(m_block_index > 0);
assert(m_block_index <= m_block_pointers->size());
m_current = (*m_block_pointers)[--m_block_index].second - 1;
}
else {
--m_current;
}
return *this;
}
self_type operator + (difference_type addend) const
{
return self_type(m_block_pointers, m_block_items, m_index + addend);
}
self_type& operator += (difference_type addend)
{
m_index += addend;
update();
return *this;
}
self_type operator - (difference_type subtrahend) const
{
return self_type(m_block_pointers, m_block_items, m_index - subtrahend);
}
difference_type operator - (const self_type& o) const
{
return (m_index - o.m_index);
}
self_type& operator -= (difference_type subtrahend)
{
m_index -= subtrahend;
update();
return *this;
}
bool operator == (const self_type& o) const
{
return m_index == o.m_index;
}
bool operator != (const self_type& o) const
{
return m_index != o.m_index;
}
bool operator < (const self_type& o) const
{
return m_index < o.m_index;
}
bool operator <= (const self_type& o) const
{
return m_index <= o.m_index;
}
bool operator > (const self_type& o) const
{
return m_index > o.m_index;
}
bool operator >= (const self_type& o) const
{
return m_index >= o.m_index;
}
friend std::ostream& operator << (std::ostream& os, const ppq_iterator& i)
{
return os << "[" << i.m_index << "]";
}
private:
//! updates m_block_index and m_current based on m_index
inline void update()
{
m_block_index = m_index / m_block_items;
const size_t local_index = m_index % m_block_items;
if (m_block_index < m_block_pointers->size()) {
m_current = (*m_block_pointers)[m_block_index].first + local_index;
assert(m_current <= (*m_block_pointers)[m_block_index].second);
}
else {
// global end if end is beyond the last real block
assert(m_block_index == m_block_pointers->size());
assert(local_index == 0);
//-tb old: m_current = (*m_block_pointers)[m_block_index - 1].second;
m_current = NULL;
}
}
};
/*!
* Internal arrays store a sorted sequence of values in RAM, which will be
* merged together into the deletion buffer when it needs to be
* refilled. Internal arrays are constructed from the insertions heaps when
* they overflow.
*/
template <class ValueType>
class internal_array : private noncopyable
{
public:
typedef ValueType value_type;
typedef ppq_iterator<value_type> iterator;
protected:
typedef typename iterator::block_pointers_type block_pointers_type;
//! Contains the items of the sorted sequence.
std::vector<value_type> m_values;
//! Index of the current head
unsigned_type m_min_index;
//! Level of internal array (Sander's PQ: group number)
unsigned_type m_level;
//! Begin and end pointers of the array
//! This is used by the iterator
block_pointers_type m_block_pointers;
public:
//! Default constructor. Don't use this directy. Needed for regrowing in
//! surrounding vector.
internal_array() : m_min_index(0) { }
//! Constructor which takes a value vector. The value vector is empty
//! afterwards.
internal_array(std::vector<value_type>& values,
unsigned_type min_index = 0,
unsigned_type level = 0)
: m_values(), m_min_index(min_index), m_level(level),
m_block_pointers(1)
{
std::swap(m_values, values);
m_block_pointers[0] = std::make_pair(&(*m_values.begin()), &(*m_values.end()));
}
//! Swap internal_array with another one.
void swap(internal_array& o)
{
using std::swap;
swap(m_values, o.m_values);
swap(m_min_index, o.m_min_index);
swap(m_level, o.m_level);
swap(m_block_pointers, o.m_block_pointers);
}
//! Swap internal_array with another one.
friend void swap(internal_array& a, internal_array& b)
{
a.swap(b);
}
//! Random access operator
inline value_type& operator [] (size_t i)
{
return m_values[i];
}
//! Use inc_min(diff) if multiple values have been extracted.
inline void inc_min(size_t diff = 1)
{
m_min_index += diff;
}
//! The currently smallest element in the array.
inline const value_type & get_min() const
{
return m_values[m_min_index];
}
//! The index of the currently smallest element in the array.
inline size_t get_min_index() const
{
return m_min_index;
}
//! The index of the largest element in the array.
inline size_t get_max_index() const
{
return (m_values.size() - 1);
}
//! Returns if the array has run empty.
inline bool empty() const
{
return (m_min_index >= m_values.size());
}
//! Make this array empty.
inline void make_empty()
{
m_min_index = m_values.size();
}
//! Returns the current size of the array.
inline size_t size() const
{
return (m_values.size() - m_min_index);
}
//! Returns the initial size of the array.
inline size_t capacity() const
{
return m_values.size();
}
//! Returns the level (group number) of the array.
inline unsigned_type level() const
{
return m_level;
}
//! Return the amount of internal memory used by an array with the capacity
//! in number of items.
static size_t int_memory(size_t capacity)
{
return sizeof(internal_array) + capacity * sizeof(value_type);
}
//! Return the amount of internal memory used by the array
inline size_t int_memory() const
{
return int_memory(m_values.capacity());
}
//! Begin iterator
inline iterator begin() const
{
// not const, unfortunately.
return iterator(&m_block_pointers, capacity(), m_min_index);
}
//! End iterator
inline iterator end() const
{
// not const, unfortunately.
return iterator(&m_block_pointers, capacity(), capacity());
}
};
template <class ExternalArrayType>
class external_array_writer;
/*!
* External array stores a sorted sequence of values on the hard disk and
* allows access to the first block (containing the smallest values). The
* class uses buffering and prefetching in order to improve the performance.
*
* \tparam ValueType Type of the contained objects (POD with no references to
* internal memory).
*
* \tparam BlockSize External block size. Default =
* STXXL_DEFAULT_BLOCK_SIZE(ValueType).
*
* \tparam AllocStrategy Allocation strategy for the external memory. Default =
* STXXL_DEFAULT_ALLOC_STRATEGY.
*/
template <
class ValueType,
unsigned_type BlockSize = STXXL_DEFAULT_BLOCK_SIZE(ValueType),
class AllocStrategy = STXXL_DEFAULT_ALLOC_STRATEGY
>
class external_array : private noncopyable
{
public:
typedef ValueType value_type;
typedef ppq_iterator<value_type> iterator;
typedef external_array<value_type, BlockSize, AllocStrategy> self_type;
typedef typed_block<BlockSize, value_type> block_type;
typedef read_write_pool<block_type> pool_type;
typedef std::vector<BID<BlockSize> > bid_vector;
typedef typename bid_vector::iterator bid_iterator;
typedef std::vector<block_type*> block_vector;
typedef std::vector<request_ptr> request_vector;
typedef std::vector<value_type> minima_vector;
typedef typename iterator::block_pointers_type block_pointers_type;
typedef external_array_writer<self_type> writer_type;
//! The number of elements fitting into one block
enum {
block_size = BlockSize,
block_items = BlockSize / sizeof(value_type)
};
static const bool debug = false;
protected:
//! The total size of the external array in items. Cannot be changed
//! after construction.
external_size_type m_capacity;
//! Number of blocks, again: calculated at construction time.
unsigned_type m_num_blocks;
//! Level of external array (Sander's PQ: group number)
unsigned_type m_level;
//! Common prefetch and write buffer pool
pool_type* m_pool;
//! The IDs of each block in external memory.
bid_vector m_bids;
//! A vector of size m_num_blocks with block_type pointers, some of them
//! will be filled while writing, but most are NULL.
block_vector m_blocks;
//! Begin and end pointers for each block, used for merging with
//! ppq_iterator.
block_pointers_type m_block_pointers;
//! The read request pointers are used to wait until the block has been
//! completely fetched.
request_vector m_requests;
//! stores the minimum value of each block
minima_vector m_minima;
//! Is array in write phase? True = write phase, false = read phase.
bool m_write_phase;
//! The total number of elements minus the number of extracted values
external_size_type m_size;
//! The read position in the array.
external_size_type m_index;
//! The index behind the last element that is located in RAM (or is at
//! least requested to be so)
external_size_type m_end_index;
//! The first unhinted block index.
unsigned_type m_unhinted_block;
//! The first unhinted block index as it was before the
//! prepare_rebuilding_hints() call. Used for removal of hints which aren't
//! needed anymore.
unsigned_type m_old_unhinted_block;
//! allow writer to access to all variables
friend class external_array_writer<self_type>;
public:
/*!
* Constructs an external array
*
* \param size The total number of elements. Cannot be changed after
* construction.
*
* \param num_prefetch_blocks Number of blocks to prefetch from hard disk
*
* \param num_write_buffer_blocks Size of the write buffer in number of
* blocks
*/
external_array(external_size_type size, pool_type* pool, unsigned_type level = 0)
: // constants
m_capacity(size),
m_num_blocks((size_t)div_ceil(m_capacity, block_items)),
m_level(level),
m_pool(pool),
// vectors
m_bids(m_num_blocks),
m_blocks(m_num_blocks, reinterpret_cast<block_type*>(1)),
m_block_pointers(m_num_blocks),
m_requests(m_num_blocks, NULL),
m_minima(m_num_blocks),
// state
m_write_phase(true),
// indices
m_size(0),
m_index(0),
m_end_index(0),
m_unhinted_block(0),
m_old_unhinted_block(0)
{
assert(m_capacity > 0);
// allocate blocks in EM.
block_manager* bm = block_manager::get_instance();
bm->new_blocks(AllocStrategy(), m_bids.begin(), m_bids.end());
}
//! Default constructor. Don't use this directy. Needed for regrowing in
//! surrounding vector.
external_array()
: // constants
m_capacity(0),
m_num_blocks(0),
m_level(0),
m_pool(NULL),
// vectors
m_bids(0),
m_blocks(0),
m_block_pointers(0),
m_requests(0),
m_minima(0),
// state
m_write_phase(false),
// indices
m_size(0),
m_index(0),
m_end_index(0),
m_unhinted_block(0),
m_old_unhinted_block(0)
{ }
//! Swap external_array with another one.
void swap(external_array& o)
{
using std::swap;
// constants
swap(m_capacity, o.m_capacity);
swap(m_num_blocks, o.m_num_blocks);
swap(m_level, o.m_level);
swap(m_pool, o.m_pool);
// vectors
swap(m_bids, o.m_bids);
swap(m_requests, o.m_requests);
swap(m_blocks, o.m_blocks);
swap(m_block_pointers, o.m_block_pointers);
swap(m_minima, o.m_minima);
// state
swap(m_write_phase, o.m_write_phase);
// indices
swap(m_size, o.m_size);
swap(m_index, o.m_index);
swap(m_end_index, o.m_end_index);
swap(m_unhinted_block, o.m_unhinted_block);
swap(m_old_unhinted_block, o.m_old_unhinted_block);
}
//! Swap external_array with another one.
friend void swap(external_array& a, external_array& b)
{
a.swap(b);
}
//! Destructor
~external_array()
{
if (m_size == 0) return;
// not all data has been read! this only happen when the PPQ is
// destroyed while containing data.
const unsigned_type block_index = m_index / block_items;
const unsigned_type end_block_index = get_end_block_index();
// released blocks currently held in RAM
for (size_t i = block_index; i < end_block_index; ++i) {
m_pool->add_prefetch(m_blocks[i]);
// cannot report the number of freed blocks to PPQ.
}
// cancel currently hinted blocks
for (size_t i = end_block_index; i < m_unhinted_block; ++i) {
STXXL_DEBUG("ea[" << this << "]: discarding prefetch hint on"
" block " << i);
m_requests[i]->cancel();
m_requests[i]->wait();
// put block back into pool
m_pool->add_prefetch(m_blocks[i]);
// invalidate block entry
m_blocks[i] = NULL;
m_requests[i] = request_ptr();
}
// figure out first block that is still allocated in EM.
bid_iterator i_begin = m_bids.begin() + block_index;
block_manager::get_instance()->delete_blocks(i_begin, m_bids.end());
// check that all is empty
for (size_t i = block_index; i < end_block_index; ++i)
assert(m_blocks[i] == NULL);
}
//! Returns the capacity in items.
size_t capacity() const
{
return m_capacity;
}
//! Returns the current size in items.
size_t size() const
{
return m_size;
}
//! Returns true if the array is empty.
bool empty() const
{
return (m_size == 0);
}
//! Returns the level (group number) of the array.
inline unsigned_type level() const
{
return m_level;
}
//! Return the number of blocks.
size_t num_blocks() const
{
return m_num_blocks;
}
//! Returns memory usage of EA with given capacity, excluding blocks loaded
//! in RAM. Blocks belong to prefetch pool.
static size_t int_memory(size_t capacity)
{
size_t num_blocks = div_ceil(capacity, block_items);
return sizeof(external_array)
+ num_blocks * sizeof(typename bid_vector::value_type)
+ num_blocks * sizeof(typename block_vector::value_type)
+ num_blocks * sizeof(typename block_pointers_type::value_type)
+ num_blocks * sizeof(typename request_vector::value_type)
+ num_blocks * sizeof(typename minima_vector::value_type);
}
//! Return the amount of internal memory used by the EA.
inline size_t int_memory() const
{
return int_memory(m_capacity);
}
//! Returns the number elements available in internal memory
size_t buffer_size() const
{
return (m_end_index - m_index);
}
//! Returns the block beyond the block in which *(m_end_index-1) is located.
unsigned_type get_end_block_index() const
{
unsigned_type end_block_index = m_end_index / block_items;
// increase block index if inside the block
if (m_end_index % block_items != 0) ++end_block_index;
assert(end_block_index <= m_num_blocks);
return end_block_index;
}
//! Returns the block in which m_index is located.
inline unsigned_type get_current_block_index() const
{
return (m_index / block_items);
}
//! Returns a random-access iterator to the begin of the data
//! in internal memory.
iterator begin() const
{
//-TODO?: assert(block_valid(m_index / block_items) || m_index == m_capacity);
return iterator(&m_block_pointers, block_items, m_index);
}
//! Returns a random-access iterator 1 behind the end of the data
//! in internal memory.
iterator end() const
{
//-TODO? assert(!block_valid(m_end_index / block_items) || m_end_index == m_capacity);
return iterator(&m_block_pointers, block_items, m_end_index);
}
//! Returns the smallest element in the array
const value_type & get_min()
{
return *begin();
}
//! Returns if there is data in EM, that's not randomly accessible.
bool has_em_data() const
{
return (get_end_block_index() < m_num_blocks);
}
//! Returns the smallest element of the first block NOT in internal memory
//! (or at least requested to be in internal memory)
const value_type & get_next_block_min() const
{
assert(get_end_block_index() < m_num_blocks);
return m_minima[get_end_block_index()];
}
//! Returns if the data requested to be in internal memory is
//! completely fetched. True if wait() has been called before.
bool valid() const
{
bool result = true;
const unsigned_type block_index = m_index / block_items;
const unsigned_type end_block_index = get_end_block_index();
for (unsigned_type i = block_index; i < end_block_index; ++i) {
result = result && block_valid(i);
}
return result;
}
//! Random access operator for data in internal memory
//! You should call wait() once after fetching data from EM.
value_type& operator [] (size_t i) const
{
assert(i < m_capacity);
const size_t block_index = i / block_items;
const size_t local_index = i % block_items;
assert(i < m_capacity);
assert(block_valid(block_index));
return m_blocks[block_index]->elem[local_index];
}
public:
//! prepare the pool for writing external arrays with given number of
//! threads
static void prepare_write_pool(pool_type& pool, unsigned_type num_threads)
{
unsigned_type write_blocks = num_threads;
// need at least one
if (write_blocks == 0) write_blocks = 1;
// for holding boundary blocks
write_blocks *= 2;
// more disks than threads?
if (write_blocks < config::get_instance()->disks_number())
write_blocks = config::get_instance()->disks_number();
#if STXXL_DEBUG_ASSERTIONS
// required for re-reading the external array
write_blocks = 2 * write_blocks;
#endif
if (pool.size_write() < write_blocks) {
STXXL_ERRMSG("WARNING: enlarging PPQ write pool to " <<
write_blocks << " blocks = " <<
write_blocks * block_size / 1024 / 1024 << " MiB");
pool.resize_write(write_blocks);
}
}
protected:
//! prepare the external_array for writing using multiway_merge() with
//! num_threads. this method is called by the external_array_writer's
//! constructor.
void prepare_write(unsigned_type num_threads)
{
prepare_write_pool(*m_pool, num_threads);
}
//! finish the writing phase after multiway_merge() filled the vector. this
//! method is called by the external_array_writer's destructor..
void finish_write()
{
// check that all blocks where written
for (unsigned_type i = 0; i < m_num_blocks; ++i)
assert(m_blocks[i] == NULL);
// compatibility to the block write interface
m_size = m_capacity;
m_index = 0;
m_end_index = 0;
m_unhinted_block = 0;
m_write_phase = false;
}
//! Called by the external_array_writer to read a block from disk into
//! m_blocks[]. If the block is marked as uninitialized, then no read is
//! performed. This is the usual case, and in theory, no block ever has be
//! re-read from disk, since all can be written fully. However, we do
//! support re-reading blocks for debugging purposes inside
//! multiway_merge(), in a full performance build re-reading never occurs.
void read_block(size_t block_index)
{
assert(block_index < m_num_blocks);
assert(m_blocks[block_index] == NULL ||
m_blocks[block_index] == reinterpret_cast<block_type*>(1));
if (m_blocks[block_index] == reinterpret_cast<block_type*>(1))
{
// special marker: this block is uninitialized -> no need to read
// from disk.
m_blocks[block_index] = m_pool->steal();
}
else
{
// block was already written, have to read from EM.
STXXL_DEBUG("ea[" << this << "]: "
"read_block needs to re-read block index=" << block_index);
static bool s_warned = false;
if (!s_warned)
{
s_warned = true;
STXXL_ERRMSG("ppq::external_array[" << this << "] "
"writer requested to re-read block from EM.");
STXXL_ERRMSG("This should never occur in full-performance mode, "
"verify that you run in debug mode.");
}
// this re-reading is not necessary for full performance builds, so
// we immediately wait for the I/O to be completed.
m_blocks[block_index] = m_pool->steal();
request_ptr req = m_pool->read(m_blocks[block_index], m_bids[block_index]);
req->wait();
assert(req->poll());
assert(m_blocks[block_index]);
}
}
//! Called by the external_array_writer to write a block from m_blocks[] to
//! disk. Prior to writing and releasing the memory, extra information is
//! preserved.
void write_block(size_t block_index)
{
assert(block_index < m_num_blocks);
assert(m_blocks[block_index] != NULL &&
m_blocks[block_index] != reinterpret_cast<block_type*>(1));
// calculate minimum and maximum values
const internal_size_type this_block_items =
std::min<internal_size_type>(block_items, m_capacity - block_index * (external_size_type)block_items);
STXXL_DEBUG("ea[" << this << "]: write_block index=" << block_index <<
" this_block_items=" << this_block_items);
assert(this_block_items > 0);
block_type& this_block = *m_blocks[block_index];
m_minima[block_index] = this_block[0];
// write out block (in background)
m_pool->write(m_blocks[block_index], m_bids[block_index]);
m_blocks[block_index] = NULL;
}
public:
//! \name Prefetching Hints
//! \{
//! Prefetch the next unhinted block, requires one free read block from the
//! global pool.
void hint_next_block()
{
assert(m_unhinted_block < m_num_blocks);
// will read (prefetch) block i
size_t i = m_unhinted_block++;
STXXL_DEBUG("ea[" << this << "]: prefetching block_index=" << i);
assert(m_pool->size_write() > 0);
assert(m_blocks[i] == NULL);
// steal block from pool, but also perform read via pool, since this
// checks the associated write_pool.
m_blocks[i] = m_pool->steal_prefetch();
m_requests[i] = m_pool->read(m_blocks[i], m_bids[i]);
}
//! Returns if there is data in EM, that's not already hinted
//! to the prefetcher.
bool has_unhinted_em_data() const
{
return (m_unhinted_block < m_num_blocks);
}
//! Returns the smallest element of the next hint candidate (the block
//! after the last hinted one).
const value_type & get_next_hintable_min() const
{
assert(m_unhinted_block < m_num_blocks);
return m_minima[m_unhinted_block];
}
//! Returns the number of hinted blocks.
size_t num_hinted_blocks() const
{
assert(get_end_block_index() <= m_unhinted_block);
return m_unhinted_block - get_end_block_index();
}
//! This method prepares rebuilding the hints (this is done after creating
//! a new EA in order to always have globally the n blocks hinted which
//! will be fetched first). Resets m_unhinted_block to the first block not
//! in RAM. Thereafter prehint_next_block() is used to advance this index.
//! finish_rebuilding_hints() should be called after placing all hints in
//! order to clean up the prefetch pool.
void rebuild_hints_prepare()
{
m_old_unhinted_block = m_unhinted_block;
m_unhinted_block = get_end_block_index();
assert(get_end_block_index() <= m_old_unhinted_block);
}
//! Advance m_unhinted_block index without actually prefetching.
void rebuild_hints_prehint_next_block()
{
assert(m_unhinted_block < m_num_blocks);
// will read (prefetch) block after cancellations.
STXXL_DEBUG("ea[" << this << "]: pre-hint of" <<
" block_index=" << m_unhinted_block);
++m_unhinted_block;
}
//! Cancel hints which aren't needed anymore from the prefetcher and fixes
//! it's size. prepare_rebuilding_hints() must be called before!
void rebuild_hints_cancel()
{
for (size_t i = m_unhinted_block; i < m_old_unhinted_block; ++i) {
STXXL_DEBUG("ea[" << this << "]: discarding prefetch hint on"
" block " << i);
m_requests[i]->cancel();
m_requests[i]->wait();
// put block back into pool
m_pool->add_prefetch(m_blocks[i]);
// invalidate block entry
m_blocks[i] = NULL;
m_requests[i] = request_ptr();
}
}
//! Perform real-hinting of pre-hinted blocks, since now canceled blocks
//! are available.
void rebuild_hints_finish()
{
for (size_t i = m_old_unhinted_block; i < m_unhinted_block; ++i)
{
STXXL_DEBUG("ea[" << this << "]: perform real-hinting of"
" block " << i);
assert(m_pool->size_write() > 0);
assert(m_blocks[i] == NULL);
m_blocks[i] = m_pool->steal_prefetch();
m_requests[i] = m_pool->read(m_blocks[i], m_bids[i]);
}
}
//! \}
public:
//! \name Waiting and Removal
//! \{
//! Waits until the next prefetched block is read into RAM, then polls for
//! any further blocks that are done as well. Returns how many blocks were
//! successfully read.
unsigned_type wait_next_blocks()
{
size_t begin = get_end_block_index(), i = begin;
STXXL_DEBUG("ea[" << this << "]: waiting for" <<
" block index=" << i <<
" end_index=" << m_end_index);
assert(has_em_data());
assert(i < m_unhinted_block);
assert(m_bids[i].valid());
assert(m_requests[i].valid());
// wait for prefetched request to finish.
m_requests[i]->wait();
assert(m_requests[i]->poll());
assert(m_blocks[i]);
update_block_pointers(i);
++i;
// poll further hinted blocks if already done
while (i < m_unhinted_block && m_requests[i]->poll())
{
STXXL_DEBUG("ea[" << this << "]: poll-ok for" <<
" block index=" << i <<
" end_index=" << m_end_index);
m_requests[i]->wait();
assert(m_requests[i]->poll());
assert(m_blocks[i]);
update_block_pointers(i);
++i;
}
m_end_index = std::min(m_capacity, i * (external_size_type)block_items);
return i - begin;
}
//! Waits until all hinted blocks are read into RAM. Returns how many
//! blocks were successfully read.
unsigned_type wait_all_hinted_blocks()
{
size_t begin = get_end_block_index(), i = begin;
while (i < m_unhinted_block)
{
STXXL_DEBUG("wait_all_hinted_blocks(): ea[" << this << "]: waiting for" <<
" block index=" << i <<
" end_index=" << m_end_index);
m_requests[i]->wait();
assert(m_requests[i]->poll());
assert(m_blocks[i]);
update_block_pointers(i);
++i;
}
m_end_index = std::min(m_capacity, i * (external_size_type)block_items);
return i - begin;
}
//! Returns the number of blocks loaded in RAM.
size_t num_used_blocks() const
{
return get_end_block_index() - (m_index / block_items);
}
//! Removes the first n elements from the array. Returns the number of
//! blocks released into the block pool.
unsigned_type remove_items(size_t n)
{
assert(m_index + n <= m_capacity);
assert(m_index + n <= m_end_index);
assert(m_size >= n);
STXXL_DEBUG("ea[" << this << "]: remove " << n << " items");
if (n == 0)
return 0;
const size_t block_index = m_index / block_items;
const size_t index_after = m_index + n;
size_t block_index_after = index_after / block_items;
size_t local_index_after = index_after % block_items;
if (m_size == n && local_index_after != 0) // end of EA
++block_index_after;
assert(block_index_after <= m_num_blocks);
bid_iterator i_begin = m_bids.begin() + block_index;
bid_iterator i_end = m_bids.begin() + block_index_after;
assert(i_begin <= i_end);
block_manager::get_instance()->delete_blocks(i_begin, i_end);
for (size_t i = block_index; i < block_index_after; ++i) {
assert(block_valid(i));
// return block to pool
m_pool->add_prefetch(m_blocks[i]);
}
m_index = index_after;
m_size -= n;
unsigned_type blocks_freed = block_index_after - block_index;
STXXL_DEBUG("ea[" << this << "]: after remove:" <<
" index_after=" << index_after <<
" block_index_after=" << block_index_after <<
" local_index_after=" << local_index_after <<
" blocks_freed=" << blocks_freed <<
" num_blocks=" << m_num_blocks <<
" capacity=" << m_capacity);
assert(block_index_after <= m_num_blocks);
// at most one block outside of the currently loaded range
assert(block_index_after <= get_end_block_index());
return blocks_freed;
}
//! \}
protected:
//! Returns if the block with the given index is completely fetched.
bool block_valid(size_t block_index) const
{
if (!m_write_phase) {
if (block_index >= m_num_blocks) return false;
return (m_requests[block_index] && m_requests[block_index]->poll());
}
else {
return (bool)m_blocks[block_index];
}
}
//! Updates the m_block_pointers vector.
//! Should be called after any steal() or read() operation.
//! This is necessary for the iterators to work properly.
inline void update_block_pointers(size_t block_index)
{
STXXL_DEBUG("ea[" << this << "]: updating block pointers for " << block_index);
m_block_pointers[block_index].first = m_blocks[block_index]->begin();
if (block_index + 1 != m_num_blocks)
m_block_pointers[block_index].second = m_blocks[block_index]->end();
else
m_block_pointers[block_index].second =
m_block_pointers[block_index].first
+ (m_capacity - block_index * block_items);
assert(m_block_pointers[block_index].first != NULL);
assert(m_block_pointers[block_index].second != NULL);
}
inline size_t last_block_items()
{
size_t mod = m_capacity % block_items;
return (mod > 0) ? mod : (size_t)block_items;
}
};
/**
* An external_array can only be written using an external_array_writer
* object. The writer objects provides iterators which are designed to be used
* by stxxl::parallel::multiway_merge() to write the external memory blocks in
* parallel. Thus in the writer we coordinate thread-safe access to the blocks
* using reference counting.
*
* An external_array_writer::iterator has two states: normal and "live". In
* normal mode, the iterator only has a valid index into the external array's
* items. In normal mode, only index calculations are possible. Once
* operator*() is called, the iterators goes into "live" mode by requesting
* access to the corresponding block. Using reference counting the blocks is
* written once all iterators are finished with the corresponding block. Since
* with operator*() we cannot know if the value is going to be written or read,
* when going to live mode, the block must be read from EM. This read overhead,
* however, is optimized by marking blocks as uninitialized in external_array,
* and skipping reads for then. In a full performance build, no block needs to
* be read from disk. Reads only occur in debug mode, when the results are
* verify.
*
* The iterator's normal/live mode only stays active for the individual
* iterator object. When an iterator is copied/assigned/calculated with the
* mode is NOT inherited! The exception is prefix operator ++, which is used by
* multiway_merge() to fill an array. Thus the implementation of the iterator
* heavily depends on the behavior of multiway_merge() and is optimized for it.
*/
template <class ExternalArrayType>
class external_array_writer : public noncopyable
{
public:
typedef ExternalArrayType ea_type;
typedef external_array_writer self_type;
typedef typename ea_type::value_type value_type;
typedef typename ea_type::block_type block_type;
//! prototype declaration of nested class.
class iterator;
//! scope based debug variable
static const bool debug = false;
protected:
//! reference to the external array to be written
ea_type& m_ea;
#ifndef NDEBUG
//! total number of iterators referencing this writer
unsigned int m_ref_total;
#endif
//! reference counters for the number of live iterators on the
//! corresponding block in external_array.
std::vector<unsigned int> m_ref_count;
//! mutex for reference counting array (this is actually nicer than
//! openmp's critical)
mutex m_mutex;
//! optimization: hold live iterators for the expected boundary blocks of
//! multiway_merge().
std::vector<iterator> m_live_boundary;
protected:
//! read block into memory and increase reference count (called when an
//! iterator goes live on the block).
block_type * get_block_ref(size_t block_index)
{
scoped_mutex_lock lock(m_mutex);
assert(block_index < m_ea.num_blocks());
unsigned int ref = m_ref_count[block_index]++;
#ifndef NDEBUG
++m_ref_total;
#endif
if (ref == 0) {
STXXL_DEBUG("get_block_ref block_index=" << block_index <<
" ref=" << ref << " reading.");
m_ea.read_block(block_index);
}
else {
STXXL_DEBUG("get_block_ref block_index=" << block_index <<
" ref=" << ref);
}
return m_ea.m_blocks[block_index];
}
//! decrease reference count on the block, and possibly write it to disk
//! (called when an iterator releases live mode).
void free_block_ref(size_t block_index)
{
scoped_mutex_lock lock(m_mutex);
assert(block_index < m_ea.num_blocks());
#ifndef NDEBUG
assert(m_ref_total > 0);
--m_ref_total;
#endif
unsigned int ref = --m_ref_count[block_index];
if (ref == 0) {
STXXL_DEBUG("free_block_ref block_index=" << block_index <<
" ref=" << ref << " written.");
m_ea.write_block(block_index);
}
else {
STXXL_DEBUG("free_block_ref block_index=" << block_index <<
" ref=" << ref);
}
}
//! allow access to the block_ref functions
friend class iterator;
public:
/**
* An iterator which can be used to write (and read) an external_array via
* an external_array_writer. See the documentation of external_array_writer.
*/
class iterator
{
public:
typedef external_array_writer writer_type;
typedef ExternalArrayType ea_type;
typedef typename ea_type::value_type value_type;
typedef value_type& reference;
typedef value_type* pointer;
typedef ptrdiff_t difference_type;
typedef std::random_access_iterator_tag iterator_category;
typedef iterator self_type;
static const size_t block_items = ea_type::block_items;
//! scope based debug variable
static const bool debug = false;
protected:
//! pointer to the external array containing the elements
writer_type* m_writer;
//! when operator* or operator-> are called, then the iterator goes
//! live and allocates a reference to the block's data (possibly
//! reading it from EM).
bool m_live;
//! index of the current element, absolute in the external array
external_size_type m_index;
//! index of the current element's block in the external array's block
//! list. undefined while m_live is false.
internal_size_type m_block_index;
//! pointer to the referenced block. undefined while m_live is false.
block_type* m_block;
//! pointer to the current element inside the referenced block.
//! undefined while m_live is false.
internal_size_type m_current;
public:
//! default constructor (should not be used directly)
iterator()
: m_writer(NULL), m_live(false), m_index(0)
{ }
//! construct a new iterator
iterator(writer_type* writer, external_size_type index)
: m_writer(writer),
m_live(false),
m_index(index)
{
STXXL_DEBUG("Construct iterator for index " << m_index);
}
//! copy an iterator, the new iterator is _not_ automatically live!
iterator(const iterator& other)
: m_writer(other.m_writer),
m_live(false),
m_index(other.m_index)
{
STXXL_DEBUG("Copy-Construct iterator for index " << m_index);
}
//! assign an iterator, the assigned iterator is not automatically live!
iterator& operator = (const iterator& other)
{
if (&other != this)
{
STXXL_DEBUG("Assign iterator to index " << other.m_index);
if (m_live)
m_writer->free_block_ref(m_block_index);
m_writer = other.m_writer;
m_live = false;
m_index = other.m_index;
}
return *this;
}
~iterator()
{
if (!m_live) return; // no need for cleanup
m_writer->free_block_ref(m_block_index);
STXXL_DEBUG("Destruction of iterator for index " << m_index <<
" in block " << m_index / block_items);
}
//! return the current absolute index inside the external array.
external_size_type get_index() const
{
return m_index;
}
//! allocates a reference to the block's data (possibly reading it from
//! EM).
void make_live()
{
assert(!m_live);
// calculate block and index inside
m_block_index = m_index / block_items;
m_current = m_index % block_items;
STXXL_DEBUG("operator*() live request for index=" << m_index <<
" block_index=" << m_block_index <<
" m_current=" << m_current);
// get block reference
m_block = m_writer->get_block_ref(m_block_index);
m_live = true;
}
//! access the current item
reference operator * ()
{
if (UNLIKELY(!m_live))
make_live();
return (*m_block)[m_current];
}
//! access the current item
pointer operator -> ()
{
return &(operator * ());
}
//! prefix-increment operator
self_type& operator ++ ()
{
++m_index;
if (UNLIKELY(!m_live)) return *this;
// if index stays in the same block, everything is fine
++m_current;
if (LIKELY(m_current != block_items)) return *this;
// release current block
m_writer->free_block_ref(m_block_index);
m_live = false;
return *this;
}
self_type operator + (difference_type addend) const
{
return self_type(m_writer, m_index + addend);
}
self_type operator - (difference_type subtrahend) const
{
return self_type(m_writer, m_index - subtrahend);
}
difference_type operator - (const self_type& o) const
{
return (m_index - o.m_index);
}
bool operator == (const self_type& o) const
{
return m_index == o.m_index;
}
bool operator != (const self_type& o) const
{
return m_index != o.m_index;
}
bool operator < (const self_type& o) const
{
return m_index < o.m_index;
}
bool operator <= (const self_type& o) const
{
return m_index <= o.m_index;
}
bool operator > (const self_type& o) const
{
return m_index > o.m_index;
}
bool operator >= (const self_type& o) const
{
return m_index >= o.m_index;
}
};
public:
external_array_writer(ea_type& ea, unsigned int num_threads = 0)
: m_ea(ea),
m_ref_count(ea.num_blocks(), 0)
{
#ifndef NDEBUG
m_ref_total = 0;
#endif
#if STXXL_PARALLEL
if (num_threads == 0)
num_threads = omp_get_max_threads();
#else
if (num_threads == 0)
num_threads = 1;
#endif
m_ea.prepare_write(num_threads);
// optimization: hold live iterators for the boundary blocks which two
// threads write to. this prohibits the blocks to be written to disk
// and read again.
double step = (double)m_ea.capacity() / (double)num_threads;
m_live_boundary.resize(num_threads - 1);
for (unsigned int i = 0; i < num_threads - 1; ++i)
{
external_size_type index = (external_size_type)((i + 1) * step);
STXXL_DEBUG("hold index " << index <<
" in block " << index / ea_type::block_items);
m_live_boundary[i] = iterator(this, index);
m_live_boundary[i].make_live();
}
}
~external_array_writer()
{
m_live_boundary.clear(); // release block boundaries
#ifndef NDEBUG
STXXL_ASSERT(m_ref_total == 0);
#endif
m_ea.finish_write();
}
iterator begin()
{
return iterator(this, 0);
}
iterator end()
{
return iterator(this, m_ea.capacity());
}
};
/*!
* The minima_tree contains minima from all sources inside the PPQ. It contains
* four substructures: winner trees for insertion heaps, internal and external
* arrays, each containing the minima from all currently allocated
* structures. These three sources, plus the deletion buffer are combined using
* a "head" inner tree containing only up to four item.
*/
template <class ParentType>
class minima_tree
{
public:
typedef ParentType parent_type;
typedef minima_tree<ParentType> self_type;
typedef typename parent_type::inv_compare_type compare_type;
typedef typename parent_type::value_type value_type;
typedef typename parent_type::proc_vector_type proc_vector_type;
typedef typename parent_type::internal_arrays_type ias_type;
typedef typename parent_type::external_arrays_type eas_type;
static const unsigned initial_ia_size = 2;
static const unsigned initial_ea_size = 2;
protected:
//! WinnerTree-Comparator for the head winner tree. It accesses all
//! relevant data structures from the priority queue.
struct head_comp
{
self_type& m_parent;
proc_vector_type& m_proc;
ias_type& m_ias;
const compare_type& m_compare;
head_comp(self_type& parent, proc_vector_type& proc,
ias_type& ias, const compare_type& compare)
: m_parent(parent),
m_proc(proc),
m_ias(ias),
m_compare(compare)
{ }
const value_type & get_value(int input) const
{
switch (input) {
case HEAP:
return m_proc[m_parent.m_heaps.top()]->insertion_heap[0];
case IA:
return m_ias[m_parent.m_ia.top()].get_min();
case EB:
return m_parent.m_parent.m_extract_buffer[
m_parent.m_parent.m_extract_buffer_index
];
default:
abort();
}
}
bool operator () (const int a, const int b) const
{
return m_compare(get_value(a), get_value(b));
}
};
//! Comparator for the insertion heaps winner tree.
struct heaps_comp
{
proc_vector_type& m_proc;
const compare_type& m_compare;
heaps_comp(proc_vector_type& proc, const compare_type& compare)
: m_proc(proc), m_compare(compare)
{ }
const value_type & get_value(int index) const
{
return m_proc[index]->insertion_heap[0];
}
bool operator () (const int a, const int b) const
{
return m_compare(get_value(a), get_value(b));
}
};
//! Comparator for the internal arrays winner tree.
struct ia_comp
{
ias_type& m_ias;
const compare_type& m_compare;
ia_comp(ias_type& ias, const compare_type& compare)
: m_ias(ias), m_compare(compare)
{ }
bool operator () (const int a, const int b) const
{
return m_compare(m_ias[a].get_min(), m_ias[b].get_min());
}
};
protected:
//! The priority queue
parent_type& m_parent;
//! value_type comparator
const compare_type& m_compare;
//! Comperator instances
head_comp m_head_comp;
heaps_comp m_heaps_comp;
ia_comp m_ia_comp;
//! The winner trees
winner_tree<head_comp> m_head;
winner_tree<heaps_comp> m_heaps;
winner_tree<ia_comp> m_ia;
public:
//! Entries in the head winner tree.
enum Types {
HEAP = 0,
IA = 1,
EB = 2,
ERROR = 3
};
//! Construct the tree of minima sources.
minima_tree(parent_type& parent)
: m_parent(parent),
m_compare(parent.m_inv_compare),
// construct comparators
m_head_comp(*this, parent.m_proc,
parent.m_internal_arrays, m_compare),
m_heaps_comp(parent.m_proc, m_compare),
m_ia_comp(parent.m_internal_arrays, m_compare),
// construct header winner tree
m_head(3, m_head_comp),
m_heaps(m_parent.m_num_insertion_heaps, m_heaps_comp),
m_ia(initial_ia_size, m_ia_comp)
{ }
//! Return smallest items of head winner tree.
std::pair<unsigned, unsigned> top()
{
unsigned type = m_head.top();
switch (type)
{
case HEAP:
return std::make_pair(HEAP, m_heaps.top());
case IA:
return std::make_pair(IA, m_ia.top());
case EB:
return std::make_pair(EB, 0);
default:
return std::make_pair(ERROR, 0);
}
}
//! Update minima tree after an item from the heap index was removed.
void update_heap(unsigned index)
{
m_heaps.notify_change(index);
m_head.notify_change(HEAP);
}
//! Update minima tree after an item of the extract buffer was removed.
void update_extract_buffer()
{
m_head.notify_change(EB);
}
//! Update minima tree after an item from an internal array was removed.
void update_internal_array(unsigned index)
{
m_ia.notify_change(index);
m_head.notify_change(IA);
}
//! Add a newly created internal array to the minima tree.
void add_internal_array(unsigned index)
{
m_ia.activate_player(index);
m_head.notify_change(IA);
}
//! Remove an insertion heap from the minima tree.
void deactivate_heap(unsigned index)
{
m_heaps.deactivate_player(index);
if (!m_heaps.empty())
m_head.notify_change(HEAP);
else
m_head.deactivate_player(HEAP);
}
//! Remove the extract buffer from the minima tree.
void deactivate_extract_buffer()
{
m_head.deactivate_player(EB);
}
//! Remove an internal array from the minima tree.
void deactivate_internal_array(unsigned index)
{
m_ia.deactivate_player(index);
if (!m_ia.empty())
m_head.notify_change(IA);
else
m_head.deactivate_player(IA);
}
//! Remove all insertion heaps from the minima tree.
void clear_heaps()
{
m_heaps.clear();
m_head.deactivate_player(HEAP);
}
//! Remove all internal arrays from the minima tree.
void clear_internal_arrays()
{
m_ia.resize_and_clear(initial_ia_size);
m_head.deactivate_player(IA);
}
void rebuild_internal_arrays()
{
m_ia.resize_and_rebuild(m_parent.m_internal_arrays.size());
if (!m_parent.m_internal_arrays.empty())
m_head.notify_change(IA);
else
m_head.deactivate_player(IA);
}
//! Return size of internal arrays minima tree
size_t ia_slots() const
{
return m_ia.num_slots();
}
//! Returns a readable representation of the winner tree as string.
std::string to_string() const
{
std::ostringstream ss;
ss << "Head:" << std::endl << m_head.to_string() << std::endl;
ss << "Heaps:" << std::endl << m_heaps.to_string() << std::endl;
ss << "IA:" << std::endl << m_ia.to_string() << std::endl;
return ss.str();
}
//! Prints statistical data.
void print_stats() const
{
STXXL_MSG("Head winner tree stats:");
m_head.print_stats();
STXXL_MSG("Heaps winner tree stats:");
m_heaps.print_stats();
STXXL_MSG("IA winner tree stats:");
m_ia.print_stats();
}
};
} // namespace ppq_local
/*!
* Parallelized External Memory Priority Queue.
*
* \tparam ValueType Type of the contained objects (POD with no references to
* internal memory).
*
* \tparam CompareType The comparator type used to determine whether one
* element is smaller than another element.
*
* \tparam DefaultMemSize Maximum memory consumption by the queue. Can be
* overwritten by the constructor. Default = 1 GiB.
*
* \tparam MaxItems Maximum number of elements the queue contains at one
* time. Default = 0 = unlimited. This is no hard limit and only used for
* optimization. Can be overwritten by the constructor.
*
* \tparam BlockSize External block size. Default =
* STXXL_DEFAULT_BLOCK_SIZE(ValueType).
*
* \tparam AllocStrategy Allocation strategy for the external memory. Default =
* STXXL_DEFAULT_ALLOC_STRATEGY.
*/
template <
class ValueType,
class CompareType = std::less<ValueType>,
class AllocStrategy = STXXL_DEFAULT_ALLOC_STRATEGY,
uint64 BlockSize = STXXL_DEFAULT_BLOCK_SIZE(ValueType),
uint64 DefaultMemSize = 1* 1024L* 1024L* 1024L,
uint64 MaxItems = 0
>
class parallel_priority_queue : private noncopyable
{
//! \name Types
//! \{
public:
typedef ValueType value_type;
typedef CompareType compare_type;
typedef AllocStrategy alloc_strategy;
static const uint64 block_size = BlockSize;
typedef uint64 size_type;
typedef typed_block<block_size, value_type> block_type;
typedef std::vector<BID<block_size> > bid_vector;
typedef bid_vector bids_container_type;
typedef read_write_pool<block_type> pool_type;
typedef ppq_local::internal_array<value_type> internal_array_type;
typedef ppq_local::external_array<value_type, block_size, AllocStrategy> external_array_type;
typedef typename external_array_type::writer_type external_array_writer_type;
typedef typename std::vector<value_type>::iterator value_iterator;
typedef typename internal_array_type::iterator iterator;
typedef std::pair<iterator, iterator> iterator_pair_type;
static const bool debug = false;
//! currently global public tuning parameter:
unsigned_type c_max_internal_level_size;
//! currently global public tuning parameter:
unsigned_type c_max_external_level_size;
protected:
//! type of insertion heap itself
typedef std::vector<value_type> heap_type;
//! type of internal arrays vector
typedef typename stxxl::swap_vector<internal_array_type> internal_arrays_type;
//! type of external arrays vector
typedef typename stxxl::swap_vector<external_array_type> external_arrays_type;
//! type of minima tree combining the structures
typedef ppq_local::minima_tree<
parallel_priority_queue<value_type, compare_type, alloc_strategy,
block_size, DefaultMemSize, MaxItems> > minima_type;
//! allow minima tree access to internal data structures
friend class ppq_local::minima_tree<
parallel_priority_queue<value_type, compare_type, alloc_strategy,
block_size, DefaultMemSize, MaxItems> >;
//! Inverse comparison functor
struct inv_compare_type
{
const compare_type& compare;
inv_compare_type(const compare_type& c)
: compare(c)
{ }
bool operator () (const value_type& x, const value_type& y) const
{
return compare(y, x);
}
};
//! <-Comparator for value_type
compare_type m_compare;
//! >-Comparator for value_type
inv_compare_type m_inv_compare;
//! Defines if statistics are gathered: dummy_custom_stats_counter or
//! custom_stats_counter
typedef dummy_custom_stats_counter<uint64> stats_counter;
//! Defines if statistics are gathered: fake_timer or timer
typedef fake_timer stats_timer;
//! \}
//! \name Compile-Time Parameters
//! \{
//! Merge sorted heaps when flushing into an internal array.
//! Pro: Reduces the risk of a large winner tree
//! Con: Flush insertion heaps becomes slower.
static const bool c_merge_sorted_heaps = true;
//! Default number of write buffer block for a new external array being
//! filled.
static const unsigned c_num_write_buffer_blocks = 14;
//! Defines for how much external arrays memory should be reserved in the
//! constructor.
static const unsigned c_num_reserved_external_arrays = 10;
//! Size of a single insertion heap in Byte, if not defined otherwise in
//! the constructor. Default: 1 MiB
static const size_type c_default_single_heap_ram = 1L * 1024L * 1024L;
//! Default limit of the extract buffer ram consumption as share of total
//! ram
// C++11: constexpr static double c_default_extract_buffer_ram_part = 0.05;
// C++98 does not allow static const double initialization here.
// It's located in global scope instead.
static const double c_default_extract_buffer_ram_part;
/*!
* Limit the size of the extract buffer to an absolute value.
*
* The actual size can be set using the extract_buffer_ram parameter of the
* constructor. If this parameter is not set, the value is calculated by
* (total_ram*c_default_extract_buffer_ram_part)
*
* If c_limit_extract_buffer==false, the memory consumption of the extract
* buffer is only limited by the number of external and internal
* arrays. This is considered in memory management using the
* ram_per_external_array and ram_per_internal_array values. Attention:
* Each internal array reserves space for the extract buffer in the size of
* all heaps together.
*/
static const bool c_limit_extract_buffer = true;
//! For bulks of size up to c_single_insert_limit sequential single insert
//! is faster than bulk_push.
static const unsigned c_single_insert_limit = 100;
//! \}
//! \name Parameters and Sizes for Memory Allocation Policy
//! Number of insertion heaps. Usually equal to the number of CPUs.
const unsigned m_num_insertion_heaps;
//! Capacity of one inserion heap
const unsigned m_insertion_heap_capacity;
//! Return size of insertion heap reservation in bytes
size_type insertion_heap_int_memory() const
{
return m_insertion_heap_capacity * sizeof(value_type);
}
//! Total amount of internal memory
const size_type m_mem_total;
//! Maximum size of extract buffer in number of elements
//! Only relevant if c_limit_extract_buffer==true
size_type m_extract_buffer_limit;
//! Size of all insertion heaps together in bytes
const size_type m_mem_for_heaps;
//! Number of read/prefetch blocks per external array.
const float m_num_read_blocks_per_ea;
//! Total number of read/prefetch buffer blocks
unsigned_type m_num_read_blocks;
//! number of currently hinted prefetch blocks
unsigned_type m_num_hinted_blocks;
//! number of currently loaded blocks
unsigned_type m_num_used_read_blocks;
//! Free memory in bytes
size_type m_mem_left;
//! \}
//! Flag if inside a bulk_push sequence.
bool m_in_bulk_push;
//! If the bulk currently being inserted is very large, this boolean is set
//! and bulk_push just accumulate the elements for eventual sorting.
bool m_is_very_large_bulk;
//! First index in m_external_arrays that was not re-hinted during a
//! bulk_push sequence.
unsigned_type m_bulk_first_delayed_external_array;
//! Index of the currently smallest element in the extract buffer
size_type m_extract_buffer_index;
//! \name Number of elements currently in the data structures
//! \{
//! Number of elements int the insertion heaps
size_type m_heaps_size;
//! Number of elements in the extract buffer
size_type m_extract_buffer_size;
//! Number of elements in the internal arrays
size_type m_internal_size;
//! Number of elements in the external arrays
size_type m_external_size;
//! \}
//! \name Data Holding Structures
//! \{
//! A struct containing the local insertion heap and other information
//! _local_ to a processor.
struct ProcessorData
{
//! The heaps where new elements are usually inserted into
heap_type insertion_heap;
//! The number of items inserted into the insheap during bulk parallel
//! access.
size_type heap_add_size;
};
typedef std::vector<ProcessorData*> proc_vector_type;
//! Array of processor local data structures, including the insertion heaps.
proc_vector_type m_proc;
//! Prefetch and write buffer pool for external arrays (has to be in front
//! of m_external_arrays)
pool_type m_pool;
//! The extract buffer where external (and internal) arrays are merged into
//! for extracting
std::vector<value_type> m_extract_buffer;
//! The sorted arrays in internal memory
internal_arrays_type m_internal_arrays;
//! The sorted arrays in external memory
external_arrays_type m_external_arrays;
//! The aggregated pushes. They cannot be extracted yet.
std::vector<value_type> m_aggregated_pushes;
//! The maximum number of internal array levels.
static const unsigned_type c_max_internal_levels = 8;
//! The number of internal arrays on each level, we use plain array.
unsigned_type m_internal_levels[c_max_internal_levels];
//! The maximum number of external array levels.
static const unsigned_type c_max_external_levels = 8;
//! The number of external arrays on each level, we use plain array.
unsigned_type m_external_levels[c_max_external_levels];
//! The winner tree containing the smallest values of all sources
//! where the globally smallest element could come from.
minima_type m_minima;
//! Compares the largest accessible value of two external arrays.
struct external_min_comparator {
const external_arrays_type& m_eas;
const inv_compare_type& m_compare;
external_min_comparator(const external_arrays_type& eas,
const inv_compare_type& compare)
: m_eas(eas), m_compare(compare) { }
bool operator () (const size_t& a, const size_t& b) const
{
return m_compare(m_eas[a].get_next_block_min(),
m_eas[b].get_next_block_min());
}
} m_external_min_comparator;
//! Tracks the largest accessible values of the external arrays if there
//! is unaccessible data in EM. The winning array is the first one that
//! needs to fetch further data from EM. Used in calculate_merge_sequences.
winner_tree<external_min_comparator> m_external_min_tree;
//! Compares the largest value of the block hinted the latest of two
//! external arrays.
struct hint_comparator {
const external_arrays_type& m_eas;
const inv_compare_type& m_compare;
hint_comparator(const external_arrays_type& eas,
const inv_compare_type& compare)
: m_eas(eas), m_compare(compare) { }
bool operator () (const size_t& a, const size_t& b) const
{
return m_compare(m_eas[a].get_next_hintable_min(),
m_eas[b].get_next_hintable_min());
}
} m_hint_comparator;
//! Tracks the largest values of the block hinted the latest of the
//! external arrays if there is unaccessible data in EM. The winning
//! array is the first one that needs to fetch further data from EM.
//! Used for prefetch hints.
winner_tree<hint_comparator> m_hint_tree;
//! Random number generator for randomly selecting a heap in sequential
//! push()
random_number32_r m_rng;
//! \}
/*
* Helper function to remove empty internal/external arrays.
*/
//! Unary operator which returns true if the external array has run empty.
struct empty_external_array_eraser {
bool operator () (external_array_type& a) const
{ return a.empty(); }
};
//! Unary operator which returns true if the internal array has run empty.
struct empty_internal_array_eraser {
bool operator () (internal_array_type& a) const
{ return a.empty(); }
};
//! Clean up empty internal arrays, free their memory and capacity
void cleanup_internal_arrays()
{
typename internal_arrays_type::iterator swap_end =
stxxl::swap_remove_if(m_internal_arrays.begin(),
m_internal_arrays.end(),
empty_internal_array_eraser());
for (typename internal_arrays_type::iterator ia = swap_end;
ia != m_internal_arrays.end(); ++ia)
{
m_mem_left += ia->int_memory();
--m_internal_levels[ia->level()];
}
if (swap_end != m_internal_arrays.end())
STXXL_DEBUG0("cleanup_internal_arrays" <<
" cleaned=" << m_internal_arrays.end() - swap_end);
m_internal_arrays.erase(swap_end, m_internal_arrays.end());
m_minima.rebuild_internal_arrays();
}
//! Clean up empty external arrays, free their memory and capacity
void cleanup_external_arrays()
{
typedef typename external_arrays_type::iterator ea_iterator;
empty_external_array_eraser pred;
// The following is a modified implementation of swap_remove_if().
// Updates m_external_min_tree accordingly.
ea_iterator first = m_external_arrays.begin();
ea_iterator last = m_external_arrays.end();
ea_iterator swap_end = first;
size_t size = m_external_arrays.end() - m_external_arrays.begin();
size_t first_removed = size;
while (first != last)
{
if (!pred(*first))
{
using std::swap;
swap(*first, *swap_end);
++swap_end;
}
else if (first_removed >= size)
{
first_removed = first - m_external_arrays.begin();
}
++first;
}
// subtract memory of EAs, which will be freed
for (ea_iterator ea = swap_end; ea != last; ++ea) {
m_mem_left += ea->int_memory();
--m_external_levels[ea->level()];
}
size_t swap_end_index = swap_end - m_external_arrays.begin();
// Deactivating all affected players first.
// Otherwise there might be outdated comparisons.
for (size_t i = size; i != first_removed; ) {
--i;
m_external_min_tree.deactivate_player_step(i);
// TODO delay if (m_in_bulk_push)?
m_hint_tree.deactivate_player_step(i);
}
// Replay moved arrays.
for (size_t i = first_removed; i < swap_end_index; ++i) {
update_external_min_tree(i);
// TODO delay if (m_in_bulk_push)?
update_hint_tree(i);
}
STXXL_DEBUG("Removed " << m_external_arrays.end() - swap_end <<
" empty external arrays.");
m_external_arrays.erase(swap_end, m_external_arrays.end());
resize_read_pool(); // shrinks read/prefetch pool
}
/*!
* SiftUp a new element from the last position in the heap, reestablishing
* the heap invariant. This is identical to std::push_heap, except that it
* returns the last element modified by siftUp. Thus we can identify if the
* minimum may have changed.
*/
template <typename RandomAccessIterator, typename HeapCompareType>
static inline unsigned_type
push_heap(RandomAccessIterator first, RandomAccessIterator last,
HeapCompareType comp)
{
typedef typename std::iterator_traits<RandomAccessIterator>::value_type
value_type;
value_type value = _GLIBCXX_MOVE(*(last - 1));
unsigned_type index = (last - first) - 1;
unsigned_type parent = (index - 1) / 2;
while (index > 0 && comp(*(first + parent), value))
{
*(first + index) = _GLIBCXX_MOVE(*(first + parent));
index = parent;
parent = (index - 1) / 2;
}
*(first + index) = _GLIBCXX_MOVE(value);
return index;
}
public:
//! \name Initialization
//! \{
/*!
* Constructor.
*
* \param compare Comparator for priority queue, which is a Max-PQ.
*
* \param total_ram Maximum RAM usage. 0 = Default = Use the template
* value DefaultMemSize.
*
* \param num_read_blocks_per_ea Number of read blocks per external
* array. Default = 1.5f
*
* \param num_write_buffer_blocks Number of write buffer blocks for a new
* external array being filled. 0 = Default = c_num_write_buffer_blocks
*
* \param num_insertion_heaps Number of insertion heaps. 0 = Default =
* Determine by omp_get_max_threads().
*
* \param single_heap_ram Memory usage for a single insertion heap.
* Default = c_single_heap_ram.
*
* \param extract_buffer_ram Memory usage for the extract buffer. Only
* relevant if c_limit_extract_buffer==true. 0 = Default = total_ram *
* c_default_extract_buffer_ram_part.
*/
parallel_priority_queue(
const compare_type& compare = compare_type(),
size_type total_ram = DefaultMemSize,
float num_read_blocks_per_ea = 1.5f,
unsigned_type num_write_buffer_blocks = c_num_write_buffer_blocks,
unsigned_type num_insertion_heaps = 0,
size_type single_heap_ram = c_default_single_heap_ram,
size_type extract_buffer_ram = 0)
: c_max_internal_level_size(64),
c_max_external_level_size(64),
m_compare(compare),
m_inv_compare(m_compare),
// Parameters and Sizes for Memory Allocation Policy
#if STXXL_PARALLEL
m_num_insertion_heaps(num_insertion_heaps > 0 ? num_insertion_heaps : omp_get_max_threads()),
#else
m_num_insertion_heaps(num_insertion_heaps > 0 ? num_insertion_heaps : 1),
#endif
m_insertion_heap_capacity(single_heap_ram / sizeof(value_type)),
m_mem_total(total_ram),
m_mem_for_heaps(m_num_insertion_heaps * single_heap_ram),
m_num_read_blocks_per_ea(num_read_blocks_per_ea),
m_num_read_blocks(0),
m_num_hinted_blocks(0),
m_num_used_read_blocks(0),
// (unnamed)
m_in_bulk_push(false),
m_is_very_large_bulk(false),
m_extract_buffer_index(0),
// Number of elements currently in the data structures
m_heaps_size(0),
m_extract_buffer_size(0),
m_internal_size(0),
m_external_size(0),
// Data Holding Structures
m_proc(m_num_insertion_heaps),
m_pool(0, num_write_buffer_blocks),
m_external_arrays(),
m_minima(*this),
m_external_min_comparator(m_external_arrays, m_inv_compare),
m_external_min_tree(4, m_external_min_comparator),
m_hint_comparator(m_external_arrays, m_inv_compare),
m_hint_tree(4, m_hint_comparator),
// flags
m_limit_extract(false)
{
#if STXXL_PARALLEL
if (!omp_get_nested()) {
omp_set_nested(1);
if (!omp_get_nested()) {
STXXL_ERRMSG("Could not enable OpenMP's nested parallelism, "
"however, the PPQ requires this OpenMP feature.");
abort();
}
}
#else
STXXL_ERRMSG("You are using stxxl::parallel_priority_queue without "
"support for OpenMP parallelism.");
STXXL_ERRMSG("This is probably not what you want, so check the "
"compilation settings.");
#endif
if (c_limit_extract_buffer) {
m_extract_buffer_limit = (extract_buffer_ram > 0)
? extract_buffer_ram / sizeof(value_type)
: static_cast<size_type>(((double)(m_mem_total) * c_default_extract_buffer_ram_part / sizeof(value_type)));
}
for (unsigned_type i = 0; i < c_max_internal_levels; ++i)
m_internal_levels[i] = 0;
for (unsigned_type i = 0; i < c_max_external_levels; ++i)
m_external_levels[i] = 0;
// TODO: Do we still need this line? Insertion heap memory is
// registered below. And merge buffer is equal to the new IA...
// total_ram - ram for the heaps - ram for the heap merger
m_mem_left = m_mem_total - 2 * m_mem_for_heaps;
// reverse insertion heap memory on processor-local memory
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
m_proc[p] = new ProcessorData;
m_proc[p]->insertion_heap.reserve(m_insertion_heap_capacity);
assert(m_proc[p]->insertion_heap.capacity() * sizeof(value_type)
== insertion_heap_int_memory());
}
m_mem_left -= m_num_insertion_heaps * insertion_heap_int_memory();
// prepare prefetch buffer pool (already done in initializer),
// initially zero.
// prepare write buffer pool: calculate size and subtract from mem_left
external_array_type::prepare_write_pool(m_pool, m_num_insertion_heaps);
m_mem_left -= m_pool.size_write() * block_size;
// prepare internal arrays
if (c_merge_sorted_heaps) {
m_internal_arrays.reserve(m_mem_total / m_mem_for_heaps);
}
else {
m_internal_arrays.reserve(m_mem_total * m_num_insertion_heaps / m_mem_for_heaps);
}
// prepare external arrays
m_external_arrays.reserve(c_num_reserved_external_arrays);
if (m_mem_total < m_mem_left) // checks if unsigned type wrapped.
{
STXXL_ERRMSG("Minimum memory requirement insufficient, "
"increase PPQ's memory limit or decrease buffers.");
abort();
}
check_invariants();
}
//! Destructor.
~parallel_priority_queue()
{
// clean up data structures
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
delete m_proc[p];
}
}
protected:
//! Assert many invariants of the data structures.
void check_invariants() const
{
#ifdef NDEBUG
// disable in Release builds
return;
#endif
size_type mem_used = 0;
mem_used += 2 * m_mem_for_heaps
+ m_pool.size_write() * block_size
+ m_pool.free_size_prefetch() * block_size
+ m_num_hinted_blocks * block_size
+ m_num_used_read_blocks * block_size;
// count number of blocks hinted in prefetcher
size_t num_hinted = 0, num_used_read = 0;
for (size_t i = 0; i < m_external_arrays.size(); ++i) {
num_hinted += m_external_arrays[i].num_hinted_blocks();
num_used_read += m_external_arrays[i].num_used_blocks();
}
STXXL_CHECK(num_hinted == m_num_hinted_blocks);
STXXL_CHECK(num_used_read == m_num_used_read_blocks);
STXXL_CHECK_EQUAL(m_num_used_read_blocks,
m_num_read_blocks
- m_pool.free_size_prefetch()
- m_num_hinted_blocks);
// test the processor local data structures
size_type heaps_size = 0;
for (unsigned p = 0; p < m_num_insertion_heaps; ++p)
{
// check that each insertion heap is a heap
// TODO: remove soon, because this is very expensive
STXXL_CHECK(1 || stxxl::is_heap(m_proc[p]->insertion_heap.begin(),
m_proc[p]->insertion_heap.end(),
m_compare));
STXXL_CHECK(m_proc[p]->insertion_heap.capacity() <= m_insertion_heap_capacity);
heaps_size += m_proc[p]->insertion_heap.size();
mem_used += m_proc[p]->insertion_heap.capacity() * sizeof(value_type);
}
if (!m_in_bulk_push)
STXXL_CHECK_EQUAL(m_heaps_size, heaps_size);
// count number of items and memory size of internal arrays
size_type ia_size = 0;
size_type ia_memory = 0;
std::vector<unsigned_type> ia_levels(c_max_internal_levels, 0);
for (typename internal_arrays_type::const_iterator ia =
m_internal_arrays.begin(); ia != m_internal_arrays.end(); ++ia)
{
ia_size += ia->size();
ia_memory += ia->int_memory();
++ia_levels[ia->level()];
}
STXXL_CHECK_EQUAL(m_internal_size, ia_size);
mem_used += ia_memory;
for (unsigned_type i = 0; i < c_max_internal_levels; ++i)
STXXL_CHECK_EQUAL(m_internal_levels[i], ia_levels[i]);
// count number of items in external arrays
size_type ea_size = 0;
size_type ea_memory = 0;
std::vector<unsigned_type> ea_levels(c_max_external_levels, 0);
for (typename external_arrays_type::const_iterator ea =
m_external_arrays.begin(); ea != m_external_arrays.end(); ++ea)
{
ea_size += ea->size();
ea_memory += ea->int_memory();
++ea_levels[ea->level()];
}
STXXL_CHECK_EQUAL(m_external_size, ea_size);
mem_used += ea_memory;
for (unsigned_type i = 0; i < c_max_external_levels; ++i)
STXXL_CHECK_EQUAL(m_external_levels[i], ea_levels[i]);
// calculate mem_used so that == mem_total - mem_left
STXXL_CHECK_EQUAL(memory_consumption(), mem_used);
}
//! \}
//! \name Properties
//! \{
public:
//! The number of elements in the queue.
inline size_type size() const
{
return m_heaps_size + m_internal_size + m_external_size + m_extract_buffer_size;
}
//! Returns if the queue is empty.
inline bool empty() const
{
return (size() == 0);
}
//! The memory consumption in Bytes.
inline size_type memory_consumption() const
{
assert(m_mem_total >= m_mem_left);
return (m_mem_total - m_mem_left);
}
protected:
//! Returns if the extract buffer is empty.
inline bool extract_buffer_empty() const
{
return (m_extract_buffer_size == 0);
}
//! \}
public:
//! \name Bulk Operations
//! \{
/*!
* Start a sequence of push operations.
* \param bulk_size Exact number of elements to push before the next pop.
*/
void bulk_push_begin(size_type bulk_size)
{
assert(!m_in_bulk_push);
m_in_bulk_push = true;
m_bulk_first_delayed_external_array = m_external_arrays.size();
size_type heap_capacity = m_num_insertion_heaps * m_insertion_heap_capacity;
// if bulk_size is large: use simple aggregation instead of keeping the
// heap property and sort everything afterwards.
if (bulk_size > heap_capacity && 0) {
m_is_very_large_bulk = true;
}
else {
m_is_very_large_bulk = false;
if (bulk_size + m_heaps_size > heap_capacity) {
if (m_heaps_size > 0) {
//flush_insertion_heaps();
}
}
}
// zero bulk insertion counters
for (unsigned p = 0; p < m_num_insertion_heaps; ++p)
m_proc[p]->heap_add_size = 0;
}
/*!
* Push an element inside a sequence of pushes.
* Run bulk_push_begin() before using this method.
*
* \param element The element to push.
* \param p The id of the insertion heap to use (usually the thread id).
*/
void bulk_push(const value_type& element, const unsigned_type p)
{
assert(m_in_bulk_push);
heap_type& insheap = m_proc[p]->insertion_heap;
if (!m_is_very_large_bulk && 0)
{
// if small bulk: if heap is full -> sort locally and put into
// internal array list. insert items and keep heap invariant.
if (UNLIKELY(insheap.size() >= m_insertion_heap_capacity)) {
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
assert(insheap.size() < insheap.capacity());
// put item onto heap and siftUp
insheap.push_back(element);
std::push_heap(insheap.begin(), insheap.end(), m_compare);
}
else if (!m_is_very_large_bulk && 1)
{
// if small bulk: if heap is full -> sort locally and put into
// internal array list. insert items but DO NOT keep heap
// invariant.
if (UNLIKELY(insheap.size() >= m_insertion_heap_capacity)) {
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
assert(insheap.size() < insheap.capacity());
// put item onto heap and DO NOT siftUp
insheap.push_back(element);
}
else // m_is_very_large_bulk
{
if (UNLIKELY(insheap.size() >= 2 * 1024 * 1024)) {
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
assert(insheap.size() < insheap.capacity());
// put onto insertion heap but do not keep heap property
insheap.push_back(element);
}
m_proc[p]->heap_add_size++;
}
/*!
* Push an element inside a bulk sequence of pushes. Run bulk_push_begin()
* before using this method. This function uses the insertion heap id =
* omp_get_thread_num().
*
* \param element The element to push.
*/
void bulk_push(const value_type& element)
{
#if STXXL_PARALLEL
return bulk_push(element, (unsigned_type)omp_get_thread_num());
#else
unsigned_type id = m_rng() % m_num_insertion_heaps;
return bulk_push(element, id);
#endif
}
/*!
* Ends a sequence of push operations. Run bulk_push_begin() and some
* bulk_push() before this.
*/
void bulk_push_end()
{
assert(m_in_bulk_push);
m_in_bulk_push = false;
if (!m_is_very_large_bulk && 0)
{
for (unsigned p = 0; p < m_num_insertion_heaps; ++p)
{
m_heaps_size += m_proc[p]->heap_add_size;
if (!m_proc[p]->insertion_heap.empty())
m_minima.update_heap(p);
}
}
else if (!m_is_very_large_bulk && 1)
{
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (unsigned p = 0; p < m_num_insertion_heaps; ++p)
{
// reestablish heap property: siftUp only those items pushed
for (unsigned_type index = m_proc[p]->heap_add_size; index != 0; ) {
std::push_heap(m_proc[p]->insertion_heap.begin(),
m_proc[p]->insertion_heap.end() - (--index),
m_compare);
}
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
}
for (unsigned p = 0; p < m_num_insertion_heaps; ++p)
{
if (!m_proc[p]->insertion_heap.empty())
m_minima.update_heap(p);
}
}
else // m_is_very_large_bulk
{
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (unsigned p = 0; p < m_num_insertion_heaps; ++p)
{
if (m_proc[p]->insertion_heap.size() >= m_insertion_heap_capacity) {
// flush out overfull insertion heap arrays
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
flush_insertion_heap(p);
}
else {
// reestablish heap property: siftUp only those items pushed
for (unsigned_type index = m_proc[p]->heap_add_size; index != 0; ) {
std::push_heap(m_proc[p]->insertion_heap.begin(),
m_proc[p]->insertion_heap.end() - (--index),
m_compare);
}
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size += m_proc[p]->heap_add_size;
m_proc[p]->heap_add_size = 0;
}
}
for (unsigned p = 0; p < m_num_insertion_heaps; ++p)
{
if (!m_proc[p]->insertion_heap.empty())
m_minima.update_heap(p);
}
}
if (m_bulk_first_delayed_external_array != m_external_arrays.size()) {
STXXL_DEBUG("bulk_push_end: run delayed re-hinting of EAs");
rebuild_hint_tree();
}
check_invariants();
}
//! Extract up to max_size values at once.
void bulk_pop(std::vector<value_type>& out, size_t max_size)
{
STXXL_DEBUG("bulk_pop_size with max_size=" << max_size);
const size_t n_elements = std::min<size_t>(max_size, size());
assert(n_elements < m_extract_buffer_limit);
if (m_heaps_size > 0)
flush_insertion_heaps();
convert_eb_into_ia();
refill_extract_buffer(n_elements, n_elements);
out.resize(0);
using std::swap;
swap(m_extract_buffer, out);
m_extract_buffer_index = 0;
m_extract_buffer_size = 0;
m_minima.deactivate_extract_buffer();
check_invariants();
}
//! Extracts all elements which are greater or equal to a given limit.
//! \param out result vector
//! \param limit limit value
//! \param max_size maximum number of items to extract
//! \return true if the buffer contains all items < limit, false it was too
//! small.
bool bulk_pop_limit(std::vector<value_type>& out, const value_type& limit,
size_t max_size = std::numeric_limits<size_t>::max())
{
STXXL_DEBUG("bulk_pop_limit with limit=" << limit);
convert_eb_into_ia();
if (m_heaps_size > 0) {
if (0)
flush_insertion_heaps();
else if (1)
flush_insertion_heaps_with_limit(limit);
}
size_type ias = m_internal_arrays.size();
size_type eas = m_external_arrays.size();
std::vector<size_type> sizes(eas + ias);
std::vector<iterator_pair_type> sequences(eas + ias);
size_type output_size = 0;
int limiting_ea_index = m_external_min_tree.top();
// pop limit may have to change due to memory limit
value_type this_limit = limit;
bool has_full_range = true;
// get all relevant blocks
while (limiting_ea_index > -1)
{
const value_type& ea_limit =
m_external_arrays[limiting_ea_index].get_next_block_min();
if (m_compare(ea_limit, this_limit)) {
// No more EM data smaller or equal to limit
break;
}
if (m_external_arrays[limiting_ea_index].num_hinted_blocks() == 0) {
// No more read/prefetch blocks available for EA
this_limit = ea_limit;
has_full_range = false;
break;
}
wait_next_ea_blocks(limiting_ea_index);
// consider next limiting EA
limiting_ea_index = m_external_min_tree.top();
STXXL_ASSERT(limiting_ea_index < (int)eas);
}
// build sequences
for (size_type i = 0; i < eas + ias; ++i) {
iterator begin, end;
if (i < eas) {
assert(!m_external_arrays[i].empty());
assert(m_external_arrays[i].valid());
begin = m_external_arrays[i].begin();
end = m_external_arrays[i].end();
}
else {
size_type j = i - eas;
assert(!(m_internal_arrays[j].empty()));
begin = m_internal_arrays[j].begin();
end = m_internal_arrays[j].end();
}
end = std::lower_bound(begin, end, this_limit, m_inv_compare);
sizes[i] = std::distance(begin, end);
sequences[i] = std::make_pair(begin, end);
}
output_size = std::accumulate(sizes.begin(), sizes.end(), 0);
if (output_size > max_size) {
output_size = max_size;
has_full_range = false;
}
out.resize(output_size);
STXXL_DEBUG("bulk_pop_limit with" <<
" sequences=" << sequences.size() <<
" output_size=" << output_size <<
" has_full_range=" << has_full_range);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
out.begin(), output_size, m_inv_compare);
advance_arrays(sequences, sizes, eas, ias);
check_invariants();
return has_full_range;
}
#if TODO_MAYBE_FIXUP_LATER
/*!
* Insert a vector of elements at one time.
* \param elements Vector containing the elements to push.
* Attention: elements vector may be owned by the PQ afterwards.
*/
void bulk_push_vector(std::vector<value_type>& elements)
{
size_type heap_capacity = m_num_insertion_heaps * m_insertion_heap_capacity;
if (elements.size() > heap_capacity / 2) {
flush_array(elements);
return;
}
bulk_push_begin(elements.size());
#if STXXL_PARALLEL
#pragma omp parallel
{
const unsigned thread_num = omp_get_thread_num();
#pragma omp parallel for
for (size_type i = 0; i < elements.size(); ++i) {
bulk_push(elements[i], thread_num);
}
}
#else
const unsigned thread_num = m_rng() % m_num_insertion_heaps;
for (size_type i = 0; i < elements.size(); ++i) {
bulk_push(elements[i], thread_num);
}
#endif
bulk_push_end();
}
#endif
//! \}
//! \name Aggregation Operations
//! \{
/*!
* Aggregate pushes. Use flush_aggregated_pushes() to finally push
* them. extract_min is allowed is allowed in between the aggregation of
* pushes if you can assure, that the extracted value is smaller than all
* of the aggregated values.
* \param element The element to push.
*/
void aggregate_push(const value_type& element)
{
m_aggregated_pushes.push_back(element);
}
#if TODO_MAYBE_FIXUP_LATER
/*!
* Insert the aggregated values into the queue using push(), bulk insert,
* or sorting, depending on the number of aggregated values.
*/
void flush_aggregated_pushes()
{
size_type size = m_aggregated_pushes.size();
size_type ram_internal = 2 * size * sizeof(value_type); // ram for the sorted array + part of the ram for the merge buffer
size_type heap_capacity = m_num_insertion_heaps * m_insertion_heap_capacity;
if (ram_internal > m_mem_for_heaps / 2) {
flush_array(m_aggregated_pushes);
}
else if ((m_aggregated_pushes.size() > c_single_insert_limit) && (m_aggregated_pushes.size() < heap_capacity)) {
bulk_push_vector(m_aggregated_pushes);
}
else {
for (value_iterator i = m_aggregated_pushes.begin(); i != m_aggregated_pushes.end(); ++i) {
push(*i);
}
}
m_aggregated_pushes.clear();
}
#endif
//! \}
//! \name std::priority_queue compliant operations
//! \{
/*!
* Insert new element
* \param element the element to insert.
* \param p number of insertion heap to insert item into
*/
void push(const value_type& element, unsigned_type p = 0)
{
assert(!m_in_bulk_push && !m_limit_extract);
heap_type& insheap = m_proc[p]->insertion_heap;
if (insheap.size() >= m_insertion_heap_capacity) {
flush_insertion_heap(p);
}
// push item to end of heap and siftUp
insheap.push_back(element);
unsigned_type index = push_heap(insheap.begin(), insheap.end(),
m_compare);
++m_heaps_size;
if (insheap.size() == 1 || index == 0)
m_minima.update_heap(p);
}
//! Access the minimum element.
const value_type & top()
{
assert(!m_in_bulk_push && !m_limit_extract);
assert(!empty());
if (extract_buffer_empty()) {
refill_extract_buffer(std::min(m_extract_buffer_limit,
m_internal_size + m_external_size));
}
static const bool debug = false;
std::pair<unsigned, unsigned> type_and_index = m_minima.top();
const unsigned& type = type_and_index.first;
const unsigned& index = type_and_index.second;
assert(type < 4);
switch (type) {
case minima_type::HEAP:
STXXL_DEBUG("heap " << index <<
": " << m_proc[index]->insertion_heap[0]);
return m_proc[index]->insertion_heap[0];
case minima_type::IA:
STXXL_DEBUG("ia " << index <<
": " << m_internal_arrays[index].get_min());
return m_internal_arrays[index].get_min();
case minima_type::EB:
STXXL_DEBUG("eb " << m_extract_buffer_index <<
": " << m_extract_buffer[m_extract_buffer_index]);
return m_extract_buffer[m_extract_buffer_index];
default:
STXXL_ERRMSG("Unknown extract type: " << type);
abort();
}
}
//! Remove the minimum element.
void pop()
{
assert(!m_in_bulk_push && !m_limit_extract);
m_stats.num_extracts++;
if (extract_buffer_empty()) {
refill_extract_buffer(std::min(m_extract_buffer_limit,
m_internal_size + m_external_size));
}
m_stats.extract_min_time.start();
std::pair<unsigned, unsigned> type_and_index = m_minima.top();
unsigned type = type_and_index.first;
unsigned index = type_and_index.second;
assert(type < 4);
switch (type) {
case minima_type::HEAP:
{
heap_type& insheap = m_proc[index]->insertion_heap;
m_stats.pop_heap_time.start();
std::pop_heap(insheap.begin(), insheap.end(), m_compare);
insheap.pop_back();
m_stats.pop_heap_time.stop();
m_heaps_size--;
if (!insheap.empty())
m_minima.update_heap(index);
else
m_minima.deactivate_heap(index);
break;
}
case minima_type::IA:
{
m_internal_arrays[index].inc_min();
m_internal_size--;
if (!(m_internal_arrays[index].empty()))
m_minima.update_internal_array(index);
else
// internal array has run empty
m_minima.deactivate_internal_array(index);
break;
}
case minima_type::EB:
{
++m_extract_buffer_index;
assert(m_extract_buffer_size > 0);
--m_extract_buffer_size;
if (!extract_buffer_empty())
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
break;
}
default:
STXXL_ERRMSG("Unknown extract type: " << type);
abort();
}
m_stats.extract_min_time.stop();
check_invariants();
}
//! \}
//! \name Bulk-Limit Operations
//! \{
protected:
//! current limit element
value_type m_limit_element;
//! flag if inside a bulk limit extract session
bool m_limit_extract;
//! flag if the extract buffer contains the full limit range
bool m_limit_has_full_range;
public:
//! Begin bulk-limit extraction session with limit element.
void limit_begin(const value_type& limit, size_type bulk_size)
{
m_limit_extract = true;
m_limit_element = limit;
std::vector<value_type> new_extract_buffer;
m_limit_has_full_range =
bulk_pop_limit(new_extract_buffer, limit, m_extract_buffer_limit);
std::swap(new_extract_buffer, m_extract_buffer);
m_extract_buffer_index = 0;
m_extract_buffer_size = m_extract_buffer.size();
if (m_extract_buffer_size)
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
bulk_push_begin(bulk_size);
}
//! Push new item >= bulk-limit element into insertion heap p.
void limit_push(const value_type& element, const unsigned_type p = 0)
{
assert(m_limit_extract);
assert(!m_compare(m_limit_element, element));
return bulk_push(element, p);
}
//! Access the minimum element, which can only be in the extract buffer.
const value_type & limit_top()
{
assert(m_limit_extract);
// if buffer is empty and we extracted the full range last time, return
// limit items as sentinel.
if (m_extract_buffer_size == 0 && m_limit_has_full_range)
return m_limit_element;
if (extract_buffer_empty())
{
// extract more items
std::vector<value_type> new_extract_buffer;
m_limit_has_full_range =
bulk_pop_limit(new_extract_buffer, m_limit_element,
m_extract_buffer_limit);
std::swap(new_extract_buffer, m_extract_buffer);
m_extract_buffer_index = 0;
m_extract_buffer_size = m_extract_buffer.size();
if (m_extract_buffer_size)
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
}
return m_extract_buffer[m_extract_buffer_index];
}
//! Remove the minimum element, only works correctly while elements < L.
void limit_pop()
{
assert(m_limit_extract);
++m_extract_buffer_index;
assert(m_extract_buffer_size > 0);
--m_extract_buffer_size;
if (extract_buffer_empty() && !m_limit_has_full_range)
{
// extract more items
std::vector<value_type> new_extract_buffer;
m_limit_has_full_range =
bulk_pop_limit(new_extract_buffer, m_limit_element,
m_extract_buffer_limit);
std::swap(new_extract_buffer, m_extract_buffer);
m_extract_buffer_index = 0;
m_extract_buffer_size = m_extract_buffer.size();
if (m_extract_buffer_size)
m_minima.update_extract_buffer();
else
m_minima.deactivate_extract_buffer();
}
}
//! Finish bulk-limit extraction session.
void limit_end()
{
assert(m_limit_extract);
bulk_push_end();
m_limit_extract = false;
}
//! \}
protected:
//! Flushes all elements of the insertion heaps which are greater
//! or equal to a given limit.
//! \param limit limit value
void flush_insertion_heaps_with_limit(const value_type& limit)
{
// perform extract for all items < L into back of insertion_heap
std::vector<unsigned_type> back_size(m_num_insertion_heaps);
//#if STXXL_PARALLEL
//#pragma omp parallel for
//#endif
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
heap_type& insheap = m_proc[p]->insertion_heap;
typename heap_type::iterator back = insheap.end();
while (back != insheap.begin() &&
m_compare(limit, insheap[0]))
{
// while top < L, perform pop_heap: put top to back and
// siftDown new items (shortens heap by one)
std::pop_heap(insheap.begin(), back, m_compare);
--back;
}
// range insheap.begin() + back to insheap.end() is < L, rest >= L.
for (typename heap_type::const_iterator it = insheap.begin();
it != insheap.end(); ++it)
{
if (it < back)
assert(!m_compare(limit, *it));
else
assert(m_compare(limit, *it));
}
back_size[p] = insheap.end() - back;
}
// put items from insertion heaps into an internal array
unsigned_type back_sum = std::accumulate(
back_size.begin(), back_size.end(), unsigned_type(0));
STXXL_DEBUG("flush_insertion_heaps_with_limit(): back_sum = " << back_sum);
if (back_sum)
{
// test that enough RAM is available for remaining items
flush_ia_ea_until_memory_free(back_sum * sizeof(value_type));
std::vector<value_type> values(back_sum);
// copy items into values vector
typename std::vector<value_type>::iterator vi = values.begin();
for (size_t p = 0; p < m_num_insertion_heaps; ++p)
{
heap_type& insheap = m_proc[p]->insertion_heap;
std::copy(insheap.end() - back_size[p], insheap.end(), vi);
vi += back_size[p];
insheap.resize(insheap.size() - back_size[p]);
if (insheap.empty())
m_minima.deactivate_heap(p);
else
m_minima.update_heap(p);
}
potentially_parallel::sort(values.begin(), values.end(), m_inv_compare);
add_as_internal_array(values);
m_heaps_size -= back_sum;
}
}
public:
/*!
* Merges all external arrays and all internal arrays into one external array.
* Public for benchmark purposes.
*/
void merge_external_arrays()
{
STXXL_ERRMSG("Merging external arrays. This should not happen."
<< " You should adjust memory assignment and/or external array level size.");
check_external_level(0, true);
STXXL_DEBUG("Merging all external arrays done.");
resize_read_pool();
// Rebuild hint tree completely as the hint sequence may have changed.
if (!m_in_bulk_push)
rebuild_hint_tree();
else
assert(m_external_arrays.size() - 1 >= m_bulk_first_delayed_external_array);
check_invariants();
}
//! Free up memory by flushing internal arrays and combining external
//! arrays until enough bytes are free.
void flush_ia_ea_until_memory_free(internal_size_type mem_free)
{
if (m_mem_left >= mem_free) return;
if (m_internal_size > 0) {
flush_internal_arrays();
}
else {
merge_external_arrays();
}
assert(m_mem_left >= mem_free);
}
//! Automatically resize the read/prefetch buffer pool depending on number
//! of external arrays.
void resize_read_pool()
{
unsigned_type new_num_read_blocks =
m_num_read_blocks_per_ea * m_external_arrays.size();
STXXL_DEBUG("resize_read_pool:" <<
" m_num_read_blocks=" << m_num_read_blocks <<
" ea_size=" << m_external_arrays.size() <<
" m_num_read_blocks_per_ea=" << m_num_read_blocks_per_ea <<
" new_num_read_blocks=" << new_num_read_blocks <<
" free_size_prefetch=" << m_pool.free_size_prefetch() <<
" m_num_hinted_blocks=" << m_num_hinted_blocks <<
" m_num_used_read_blocks=" << m_num_used_read_blocks);
// add new blocks
if (new_num_read_blocks > m_num_read_blocks)
{
unsigned_type mem_needed =
(new_num_read_blocks - m_num_read_blocks) * block_size;
// -tb: this may recursively call this function!
//flush_ia_ea_until_memory_free(mem_needed);
STXXL_ASSERT(m_mem_left >= mem_needed);
while (new_num_read_blocks > m_num_read_blocks) {
block_type* new_block = new block_type();
m_pool.add_prefetch(new_block);
++m_num_read_blocks;
}
m_mem_left -= mem_needed;
}
// steal extra blocks (as many as possible)
if (new_num_read_blocks < m_num_read_blocks)
{
while (new_num_read_blocks < m_num_read_blocks &&
m_pool.free_size_prefetch() > 0)
{
block_type* del_block = m_pool.steal_prefetch();
delete del_block;
--m_num_read_blocks;
m_mem_left += block_size;
}
if (new_num_read_blocks < m_num_read_blocks)
STXXL_ERRMSG("WARNING: could not immediately reduce read/prefetch pool!");
}
}
//! Rebuild hint tree completely as the hint sequence may have changed, and
//! re-hint the correct block sequence.
void rebuild_hint_tree()
{
m_stats.hint_time.start();
// prepare rehinting sequence: reset hint begin pointer
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_prepare();
// rebuild hint tree with first elements
for (size_t i = 0; i < m_external_arrays.size(); ++i)
{
if (m_external_arrays[i].has_unhinted_em_data()) {
m_hint_tree.activate_without_replay(i);
}
else {
m_hint_tree.deactivate_without_replay(i);
}
}
m_hint_tree.rebuild();
// virtually release all hints
unsigned_type free_prefetch_blocks =
m_pool.free_size_prefetch() + m_num_hinted_blocks;
m_num_hinted_blocks = 0;
int gmin_index;
while (free_prefetch_blocks > 0 &&
(gmin_index = m_hint_tree.top()) >= 0)
{
assert((size_t)gmin_index < m_external_arrays.size());
STXXL_DEBUG("Give pre-hint in EA[" << gmin_index << "] min " <<
m_external_arrays[gmin_index].get_next_hintable_min());
m_external_arrays[gmin_index].rebuild_hints_prehint_next_block();
--free_prefetch_blocks;
++m_num_hinted_blocks;
if (m_external_arrays[gmin_index].has_unhinted_em_data()) {
m_hint_tree.replay_on_change(gmin_index);
}
else {
m_hint_tree.deactivate_player(gmin_index);
}
}
// invalidate all hinted blocks no longer needed
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_cancel();
// perform real hinting on pre-hinted blocks
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_finish();
assert(free_prefetch_blocks == m_pool.free_size_prefetch());
m_stats.hint_time.stop();
}
//! Updates the prefetch prediction tree afer a remove_items(), which frees
//! up blocks.
//! \param ea_index index of the external array in question
inline void update_hint_tree(size_t ea_index)
{
m_stats.hint_time.start();
if (m_external_arrays[ea_index].has_unhinted_em_data()) {
m_hint_tree.replay_on_change(ea_index);
}
else {
m_hint_tree.deactivate_player(ea_index);
}
m_stats.hint_time.stop();
}
//! Updates the external min tree afer a remove() or a
//! wait_next_blocks() call.
//! \param ea_index index of the external array in question
inline void update_external_min_tree(size_t ea_index)
{
if (m_external_arrays[ea_index].has_em_data()) {
m_external_min_tree.replay_on_change(ea_index);
}
else {
m_external_min_tree.deactivate_player(ea_index);
}
}
//! Hints EA blocks which will be needed soon. Hints at most
//! m_num_prefetchers blocks globally.
inline void hint_external_arrays()
{
m_stats.hint_time.start();
STXXL_DEBUG("hint_external_arrays()"
" for free_size_prefetch=" << m_pool.free_size_prefetch());
int gmin_index;
while (m_pool.free_size_prefetch() > 0 &&
(gmin_index = m_hint_tree.top()) >= 0)
{
assert((size_t)gmin_index < m_external_arrays.size());
STXXL_DEBUG("Give hint in EA[" << gmin_index << "]");
m_external_arrays[gmin_index].hint_next_block();
++m_num_hinted_blocks;
if (m_external_arrays[gmin_index].has_unhinted_em_data()) {
m_hint_tree.replay_on_change(gmin_index);
}
else {
m_hint_tree.deactivate_player(gmin_index);
}
}
m_stats.hint_time.stop();
}
//! Print statistics.
void print_stats() const
{
STXXL_VARDUMP(c_merge_sorted_heaps);
STXXL_VARDUMP(c_limit_extract_buffer);
STXXL_VARDUMP(c_single_insert_limit);
if (c_limit_extract_buffer) {
STXXL_VARDUMP(m_extract_buffer_limit);
STXXL_MEMDUMP(m_extract_buffer_limit * sizeof(value_type));
}
#if STXXL_PARALLEL
STXXL_VARDUMP(omp_get_max_threads());
#endif
STXXL_MEMDUMP(m_mem_for_heaps);
STXXL_MEMDUMP(m_mem_left);
//if (num_extract_buffer_refills > 0) {
// STXXL_VARDUMP(total_extract_buffer_size / num_extract_buffer_refills);
// STXXL_MEMDUMP(total_extract_buffer_size / num_extract_buffer_refills * sizeof(value_type));
//}
STXXL_MSG(m_stats);
m_minima.print_stats();
}
protected:
//! Calculates the sequences vector needed by the multiway merger,
//! considering inaccessible data from external arrays.
//! The sizes vector stores the size of each sequence.
//! \param reuse_previous_lower_bounds Reuse upper bounds from previous runs.
//! sequences[i].second must be valid upper bound iterator from a previous run!
//! \returns the index of the external array which is limiting factor
//! or m_external_arrays.size() if not limited.
size_t calculate_merge_sequences(std::vector<size_type>& sizes,
std::vector<iterator_pair_type>& sequences,
bool reuse_previous_lower_bounds = false)
{
STXXL_DEBUG("calculate merge sequences");
static const bool debug = false;
const size_type eas = m_external_arrays.size();
const size_type ias = m_internal_arrays.size();
assert(sizes.size() == eas + ias);
assert(sequences.size() == eas + ias);
/*
* determine minimum of each first block
*/
int gmin_index = m_external_min_tree.top();
bool needs_limit = (gmin_index >= 0) ? true : false;
// test correctness of external block min tree
#ifdef STXXL_DEBUG_ASSERTIONS
bool test_needs_limit = false;
int test_gmin_index = 0;
value_type test_gmin_value;
m_stats.refill_minmax_time.start();
for (size_type i = 0; i < eas; ++i) {
if (m_external_arrays[i].has_em_data()) {
const value_type& min_value =
m_external_arrays[i].get_next_block_min();
if (!test_needs_limit) {
test_needs_limit = true;
test_gmin_value = min_value;
test_gmin_index = i;
}
else {
STXXL_DEBUG("min[" << i << "]: " << min_value <<
" test: " << test_gmin_value <<
": " << m_inv_compare(min_value, test_gmin_value));
if (m_inv_compare(min_value, test_gmin_value)) {
test_gmin_value = min_value;
test_gmin_index = i;
}
}
}
}
m_stats.refill_minmax_time.stop();
STXXL_ASSERT(needs_limit == test_needs_limit);
STXXL_ASSERT(!needs_limit || gmin_index == test_gmin_index);
#endif
/*
* calculate size and create sequences to merge
*/
#if STXXL_PARALLEL
// #pragma omp parallel for if(eas + ias > m_num_insertion_heaps)
#endif
for (size_type i = 0; i < eas + ias; ++i) {
iterator begin, end;
if (i < eas) {
begin = m_external_arrays[i].begin();
end = m_external_arrays[i].end();
}
else {
size_type j = i - eas;
begin = m_internal_arrays[j].begin();
end = m_internal_arrays[j].end();
}
if (needs_limit) {
const value_type& gmin_value =
m_external_arrays[gmin_index].get_next_block_min();
// remove timer if parallel
//stats.refill_lower_bound_time.start();
if (reuse_previous_lower_bounds) {
// Be careful that sequences[i].second is really valid and
// set by a previous calculate_merge_sequences() run!
end = std::lower_bound(sequences[i].second, end,
gmin_value, m_inv_compare);
}
else
{
end = std::lower_bound(begin, end,
gmin_value, m_inv_compare);
}
//stats.refill_lower_bound_time.stop();
}
sizes[i] = std::distance(begin, end);
sequences[i] = std::make_pair(begin, end);
STXXL_DEBUG("sequence[" << i << "] " << (i < eas ? "ea " : "ia ") <<
begin << " - " << end <<
" size " << sizes[i] <<
(needs_limit ? " with ub limit" : ""));
}
if (needs_limit) {
STXXL_DEBUG("return with needs_limit: gmin_index=" << gmin_index);
return gmin_index;
}
else {
STXXL_DEBUG("return with needs_limit: eas=" << eas);
return eas;
}
}
protected:
//! Convert extract buffer into a new internal array.
void convert_eb_into_ia(bool do_not_flush = false)
{
if (m_extract_buffer_size == 0) return;
STXXL_DEBUG("convert_eb_into_ia");
// tb: if in limit sequence and the EB gets flushed out to EM, then we
// have to re-merge items into the EB instead of returning the
// sentinel.
m_limit_has_full_range = false;
// TODO: memory is NOT allocated, but extract buffer is currently not
// counted
if (!do_not_flush)
flush_ia_ea_until_memory_free(
internal_array_type::int_memory(m_extract_buffer.size())
);
if (m_extract_buffer_size == 0) return;
// first deactivate extract buffer to replay tree for new IA.
m_minima.deactivate_extract_buffer();
// add eb as internal array with current index
add_as_internal_array(m_extract_buffer, m_extract_buffer_index);
m_extract_buffer_index = 0;
m_extract_buffer_size = 0;
}
//! Refills the extract buffer from the external arrays.
//! \param minimum_size requested minimum size of the resulting extract buffer.
//! Prints a warning if there is not enough data to reach this size.
//! \param maximum_size maximum size of the extract buffer. Using
//! m_extract_buffer_limit if set to 0.
inline void refill_extract_buffer(size_t minimum_size = 0,
size_t maximum_size = 0)
{
STXXL_DEBUG("refilling extract buffer" <<
" ia_size=" << m_internal_arrays.size() <<
" ea_size=" << m_external_arrays.size());
if (maximum_size == 0)
maximum_size = m_extract_buffer_limit;
check_invariants();
assert(extract_buffer_empty());
m_extract_buffer_index = 0;
cleanup_external_arrays();
size_type ias, eas = m_external_arrays.size();
m_minima.clear_internal_arrays();
cleanup_internal_arrays();
ias = m_internal_arrays.size();
if (eas == 0 && ias == 0) {
m_extract_buffer.resize(0);
m_minima.deactivate_extract_buffer();
return;
}
m_stats.num_extract_buffer_refills++;
m_stats.refill_extract_buffer_time.start();
m_stats.refill_time_before_merge.start();
std::vector<size_type> sizes(eas + ias);
std::vector<iterator_pair_type> sequences(eas + ias);
size_type output_size = 0;
if (minimum_size > 0) {
size_t limiting_ea_index = eas + 1;
bool reuse_lower_bounds = false;
while (output_size < minimum_size)
{
STXXL_DEBUG("refill: request more data," <<
" output_size=" << output_size <<
" minimum_size=" << minimum_size <<
" limiting_ea_index=" << limiting_ea_index);
if (limiting_ea_index < eas) {
if (m_external_arrays[limiting_ea_index].num_hinted_blocks() == 0)
break;
wait_next_ea_blocks(limiting_ea_index);
reuse_lower_bounds = true;
}
else if (limiting_ea_index == eas) {
// no more unaccessible EM data
STXXL_MSG("Warning: refill_extract_buffer(n): "
"minimum_size > # mergeable elements!");
break;
}
limiting_ea_index = calculate_merge_sequences(
sizes, sequences, reuse_lower_bounds);
output_size = std::accumulate(sizes.begin(), sizes.end(), 0);
}
}
else {
calculate_merge_sequences(sizes, sequences);
output_size = std::accumulate(sizes.begin(), sizes.end(), 0);
}
if (c_limit_extract_buffer) {
output_size = std::min<size_t>(output_size, maximum_size);
}
m_stats.max_extract_buffer_size.set_max(output_size);
m_stats.total_extract_buffer_size += output_size;
assert(output_size > 0);
m_extract_buffer.resize(output_size);
m_extract_buffer_size = output_size;
m_stats.refill_time_before_merge.stop();
m_stats.refill_merge_time.start();
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
m_extract_buffer.begin(), output_size, m_inv_compare);
m_stats.refill_merge_time.stop();
m_stats.refill_time_after_merge.start();
advance_arrays(sequences, sizes, eas, ias);
m_minima.update_extract_buffer();
m_stats.refill_time_after_merge.stop();
m_stats.refill_extract_buffer_time.stop();
check_invariants();
}
//! Requests more EM data from a given EA and updates
//! the winner trees and hints accordingly.
inline void wait_next_ea_blocks(unsigned_type ea_index)
{
unsigned_type used_blocks =
m_external_arrays[ea_index].wait_next_blocks();
m_num_hinted_blocks -= used_blocks;
m_num_used_read_blocks += used_blocks;
update_external_min_tree(ea_index);
}
// Removes empty arrays and updates the winner trees accordingly
inline void advance_arrays(std::vector<iterator_pair_type>& sequences,
std::vector<size_type>& sizes,
size_t eas, size_t ias)
{
unsigned_type total_freed_blocks = 0;
for (size_type i = 0; i < eas + ias; ++i) {
// dist represents the number of elements that haven't been merged
size_type dist = std::distance(sequences[i].first,
sequences[i].second);
const size_t diff = sizes[i] - dist;
if (diff == 0) continue;
if (i < eas) {
// remove items and free blocks in RAM.
unsigned_type freed_blocks =
m_external_arrays[i].remove_items(diff);
m_num_used_read_blocks -= freed_blocks;
total_freed_blocks += freed_blocks;
// correct item count.
assert(m_external_size >= diff);
m_external_size -= diff;
}
else {
size_type j = i - eas;
m_internal_arrays[j].inc_min(diff);
assert(m_internal_size >= diff);
m_internal_size -= diff;
}
}
// remove empty arrays - important for the next round (may also reduce
// number of prefetch buffers, so must be before hinting).
cleanup_external_arrays();
// prefetch new blocks from EAs using freed blocks
if (total_freed_blocks)
hint_external_arrays();
m_stats.num_new_external_arrays = 0;
cleanup_internal_arrays();
}
//! Flushes the insertions heap p into an internal array.
inline void flush_insertion_heap(unsigned_type p)
{
assert(m_proc[p]->insertion_heap.size() != 0);
heap_type& insheap = m_proc[p]->insertion_heap;
size_t size = insheap.size();
STXXL_DEBUG0(
"Flushing insertion heap array p=" << p <<
" size=" << insheap.size() <<
" capacity=" << insheap.capacity() <<
" int_memory=" << internal_array_type::int_memory(insheap.size()) <<
" mem_left=" << m_mem_left);
m_stats.num_insertion_heap_flushes++;
stats_timer flush_time(true); // separate timer due to parallel sorting
// sort locally, independent of others
std::sort(insheap.begin(), insheap.end(), m_inv_compare);
#if STXXL_PARALLEL
#pragma omp critical (stxxl_flush_insertion_heap)
#endif
{
// test that enough RAM is available for merged internal array:
// otherwise flush the existing internal arrays out to disk.
flush_ia_ea_until_memory_free(
internal_array_type::int_memory(insheap.size()));
// invalidate player in minima tree (before adding the IA to tree)
m_minima.deactivate_heap(p);
// insheap is empty afterwards, as vector was swapped into new_array
add_as_internal_array(insheap);
// reserve new insertion heap
insheap.reserve(m_insertion_heap_capacity);
assert(insheap.capacity() * sizeof(value_type)
== insertion_heap_int_memory());
// update item counts
#if STXXL_PARALLEL
#pragma omp atomic
#endif
m_heaps_size -= size;
}
m_stats.insertion_heap_flush_time += flush_time;
}
//! Flushes all insertions heaps into an internal array.
inline void flush_insertion_heaps()
{
size_type max_mem_needed;
if (c_merge_sorted_heaps) {
max_mem_needed = m_mem_for_heaps;
}
else {
max_mem_needed = insertion_heap_int_memory();
}
// test that enough RAM is available for merged internal array:
// otherwise flush the existing internal arrays out to disk.
flush_ia_ea_until_memory_free(max_mem_needed);
m_stats.num_insertion_heap_flushes++;
m_stats.insertion_heap_flush_time.start();
size_type size = m_heaps_size;
size_type int_memory = 0;
assert(size > 0);
std::vector<std::pair<value_iterator, value_iterator> > sequences(m_num_insertion_heaps);
#if STXXL_PARALLEL
#pragma omp parallel for
#endif
for (unsigned i = 0; i < m_num_insertion_heaps; ++i)
{
heap_type& insheap = m_proc[i]->insertion_heap;
std::sort(insheap.begin(), insheap.end(), m_inv_compare);
if (c_merge_sorted_heaps)
sequences[i] = std::make_pair(insheap.begin(), insheap.end());
int_memory += insheap.capacity();
}
if (c_merge_sorted_heaps)
{
m_stats.merge_sorted_heaps_time.start();
std::vector<value_type> merged_array(size);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
merged_array.begin(), size, m_inv_compare);
m_stats.merge_sorted_heaps_time.stop();
add_as_internal_array(merged_array);
for (unsigned i = 0; i < m_num_insertion_heaps; ++i)
{
m_proc[i]->insertion_heap.clear();
m_proc[i]->insertion_heap.reserve(m_insertion_heap_capacity);
}
m_minima.clear_heaps();
}
else
{
for (unsigned i = 0; i < m_num_insertion_heaps; ++i)
{
heap_type& insheap = m_proc[i]->insertion_heap;
if (insheap.size() == 0) continue;
add_as_internal_array(insheap);
// reserve new insertion heap
insheap.reserve(m_insertion_heap_capacity);
}
m_minima.clear_heaps();
}
m_heaps_size = 0;
m_stats.insertion_heap_flush_time.stop();
check_invariants();
}
//! Flushes the internal arrays into an external array.
void flush_internal_arrays()
{
STXXL_DEBUG("Flushing internal arrays" <<
" num_arrays=" << m_internal_arrays.size());
m_stats.num_internal_array_flushes++;
m_stats.internal_array_flush_time.start();
m_minima.clear_internal_arrays();
// also flush extract buffer items out to disk.
convert_eb_into_ia(true);
// clean up internal arrays that have been deleted in extract_min!
cleanup_internal_arrays();
size_type num_arrays = m_internal_arrays.size();
size_type size = m_internal_size;
size_type int_memory = 0;
std::vector<iterator_pair_type> sequences(num_arrays);
for (unsigned i = 0; i < num_arrays; ++i)
{
sequences[i] = std::make_pair(m_internal_arrays[i].begin(),
m_internal_arrays[i].end());
int_memory += m_internal_arrays[i].int_memory();
}
// must release more RAM in IAs than the EA takes, otherwise: merge
// external and internal arrays!
if (int_memory < external_array_type::int_memory(size)
+ ceil(m_num_read_blocks_per_ea) * block_size)
{
return merge_external_arrays();
}
// construct new external array
external_array_type ea(size, &m_pool, 0);
m_stats.max_merge_buffer_size.set_max(size);
{
external_array_writer_type external_array_writer(ea);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
external_array_writer.begin(), size, m_inv_compare);
}
STXXL_DEBUG("Merge done of new ea " << &ea);
m_external_arrays.swap_back(ea);
m_internal_size = 0;
m_external_size += size;
// register EA in min tree
// important for check_external_level()!
m_external_min_tree.activate_without_replay(m_external_arrays.size() - 1);
update_external_min_tree(m_external_arrays.size() - 1);
// register EA in hint tree
m_hint_tree.activate_without_replay(m_external_arrays.size() - 1);
if (!m_in_bulk_push)
update_hint_tree(m_external_arrays.size() - 1);
// else: done in bulk_push_end() -> rebuild_hint_tree()
m_internal_arrays.clear();
m_stats.num_new_internal_arrays = 0;
cleanup_internal_arrays();
// TODO: is this necessary? See cleanup_internal_arrays().
for (size_t i = 0; i < c_max_internal_levels; ++i)
m_internal_levels[i] = 0;
m_mem_left += int_memory;
m_mem_left -= m_external_arrays.back().int_memory();
m_stats.max_num_external_arrays.set_max(m_external_arrays.size());
m_stats.internal_array_flush_time.stop();
// update EA level and potentially merge
++m_external_levels[0];
check_external_level(0);
resize_read_pool();
// Rebuild hint tree completely as the hint sequence may have changed.
if (!m_in_bulk_push)
rebuild_hint_tree();
else
assert(m_external_arrays.size() - 1 >= m_bulk_first_delayed_external_array);
check_invariants();
}
// Compares the largest accessible value of two external arrays.
struct s_min_tree_comparator {
const external_arrays_type& m_eas;
const std::vector<unsigned_type>& m_indices;
const inv_compare_type& m_compare;
s_min_tree_comparator(const external_arrays_type& eas,
const inv_compare_type& compare,
const std::vector<unsigned_type>& indices)
: m_eas(eas), m_indices(indices), m_compare(compare) { }
bool operator () (const size_t& a, const size_t& b) const
{
return m_compare(m_eas[m_indices[a]].get_next_hintable_min(),
m_eas[m_indices[b]].get_next_hintable_min());
}
};
//! Merges external arrays if there are too many external arrays on
//! the same level.
void check_external_level(unsigned_type level, bool force_merge_all = false)
{
if (!force_merge_all)
STXXL_DEBUG("Checking external level " << level);
// return if EA level is not full
if (m_external_levels[level] < c_max_external_level_size && !force_merge_all)
return;
unsigned_type level_size = 0;
size_type int_memory = 0;
std::vector<unsigned_type> ea_index;
for (unsigned_type i = 0; i < m_external_arrays.size(); ++i)
{
if (m_external_arrays[i].level() != level && !force_merge_all) continue;
if (m_external_arrays[i].empty()) continue;
level_size += m_external_arrays[i].size();
int_memory += m_external_arrays[i].int_memory();
ea_index.push_back(i);
}
// return if there is not enough RAM for the new array.
// TODO: force_merge_all==true is for freeing memory. Breaking here is not
// helpful in this case. But one should maybe reserve some space in advance.
if (m_mem_left < external_array_type::int_memory(level_size) && !force_merge_all)
return;
m_mem_left -= external_array_type::int_memory(level_size);
STXXL_ASSERT(force_merge_all || c_max_external_level_size == ea_index.size());
unsigned_type num_arrays_to_merge = ea_index.size();
STXXL_DEBUG("merging external arrays" <<
" level=" << level <<
" level_size=" << level_size <<
" sequences=" << num_arrays_to_merge <<
" force_merge_all=" << force_merge_all);
// if force_merge_all: create array in highest level to avoid merging
// of such a large EA.
unsigned_type new_level = force_merge_all ? c_max_external_levels - 1 : level + 1;
// construct new external array
external_array_type ea(level_size, &m_pool, new_level);
{
external_array_writer_type external_array_writer(ea);
typename external_array_writer_type::iterator out_iter
= external_array_writer.begin();
// === build minima_tree over the level's arrays ===
s_min_tree_comparator min_tree_comparator(m_external_arrays,
m_inv_compare, ea_index);
winner_tree<s_min_tree_comparator> min_tree(num_arrays_to_merge,
min_tree_comparator);
// =================================================
unsigned_type num_arrays_done = 0;
while (num_arrays_to_merge != num_arrays_done)
{
STXXL_DEBUG("num_arrays_done = " << num_arrays_done);
// === build hints ===
for (unsigned_type i = 0; i < num_arrays_to_merge; ++i) {
if (m_external_arrays[ea_index[i]].has_unhinted_em_data()) {
min_tree.activate_without_replay(i);
}
else {
min_tree.deactivate_without_replay(i);
}
}
min_tree.rebuild();
// === fill available memory with read blocks ===
while (m_mem_left >= block_size) {
block_type* new_block = new block_type();
m_pool.add_prefetch(new_block);
++m_num_read_blocks;
m_mem_left -= block_size;
}
// ==============================================
// cleanup hints (all arrays, not only the ones to merge)
for (unsigned_type i = 0; i < m_external_arrays.size(); ++i) {
m_external_arrays[i].rebuild_hints_prepare();
}
// virtually release all hints
unsigned_type free_prefetch_blocks =
m_pool.free_size_prefetch() + m_num_hinted_blocks;
m_num_hinted_blocks = 0;
int gmin_index_index; // index in ea_index
while (free_prefetch_blocks > 0 &&
(gmin_index_index = min_tree.top()) >= 0)
{
const unsigned_type gmin_index = ea_index[gmin_index_index];
assert(gmin_index < m_external_arrays.size());
STXXL_DEBUG0("check_external_level():Give pre-hint in EA[" << gmin_index << "] min " <<
m_external_arrays[gmin_index].get_next_hintable_min());
m_external_arrays[gmin_index].rebuild_hints_prehint_next_block();
--free_prefetch_blocks;
++m_num_hinted_blocks;
if (m_external_arrays[gmin_index].has_unhinted_em_data()) {
min_tree.replay_on_change(gmin_index_index);
}
else {
min_tree.deactivate_player(gmin_index_index);
}
}
// invalidate all hinted blocks no longer needed
// (all arrays, not only the ones to merge)
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_cancel();
// perform real hinting on pre-hinted blocks
// (all arrays, not only the ones to merge)
for (size_t i = 0; i < m_external_arrays.size(); ++i)
m_external_arrays[i].rebuild_hints_finish();
assert(free_prefetch_blocks == m_pool.free_size_prefetch());
// ================================ end build hints ======
// === wait for data ===
for (size_type i = 0; i < num_arrays_to_merge; ++i) {
const unsigned_type index = ea_index[i];
unsigned_type used_blocks =
m_external_arrays[index].wait_all_hinted_blocks();
m_num_hinted_blocks -= used_blocks;
m_num_used_read_blocks += used_blocks;
}
// =====================
// === build sequences ===
std::vector<iterator_pair_type> sequences(num_arrays_to_merge);
std::vector<size_type> sizes(num_arrays_to_merge);
gmin_index_index = min_tree.top();
bool needs_limit = (gmin_index_index >= 0) ? true : false;
for (size_type i = 0; i < num_arrays_to_merge; ++i) {
const unsigned_type index = ea_index[i];
iterator begin = m_external_arrays[index].begin();
iterator end = m_external_arrays[index].end();
if (needs_limit) {
const unsigned_type gmin_index = ea_index[gmin_index_index];
const value_type& gmin_value =
m_external_arrays[gmin_index].get_next_block_min();
end = std::lower_bound(begin, end,
gmin_value, m_inv_compare);
}
sizes[i] = std::distance(begin, end);
sequences[i] = std::make_pair(begin, end);
STXXL_DEBUG("sequence[" << i << "] ea " <<
begin << " - " << end <<
" size " << sizes[i] <<
(needs_limit ? " with ub limit" : ""));
}
// ==========================================
// === merge ===
size_type output_size = std::accumulate(sizes.begin(), sizes.end(), 0);
out_iter = potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
out_iter, output_size, m_inv_compare);
for (unsigned_type i = 0; i < num_arrays_to_merge; ++i) {
const unsigned_type index = ea_index[i];
if (!m_external_arrays[index].empty()) {
// remove items and free blocks in RAM.
unsigned_type freed_blocks =
m_external_arrays[index].remove_items(sizes[i]);
m_num_used_read_blocks -= freed_blocks;
if (m_external_arrays[index].empty())
++num_arrays_done;
}
}
// reset read buffer
resize_read_pool();
// cannot call clear_external_arrays() here, since it
// corrupts ea_index.
}
if (m_in_bulk_push)
m_bulk_first_delayed_external_array = 0; // TODO: workaround
} // destroy external_array_writer
// clean up now empty arrays
cleanup_external_arrays();
m_external_arrays.swap_back(ea);
++m_external_levels[new_level];
// register EA in min tree
m_external_min_tree.activate_without_replay(m_external_arrays.size() - 1);
update_external_min_tree(m_external_arrays.size() - 1);
// register EA in hint tree
m_hint_tree.activate_without_replay(m_external_arrays.size() - 1);
if (!m_in_bulk_push)
update_hint_tree(m_external_arrays.size() - 1);
// else: done in bulk_push_end() -> rebuild_hint_tree()
STXXL_DEBUG("Merge done of new ea " << &ea);
if (!force_merge_all)
check_external_level(level + 1);
check_invariants();
}
//! Add new internal array, which requires that values are sorted!
//! automatically decreases m_mem_left! also merges internal arrays if
//! there are too many internal arrays on the same level.
void add_as_internal_array(std::vector<value_type>& values,
unsigned_type used = 0,
unsigned_type level = 0)
{
const size_t size = values.size();
const size_t capacity = values.capacity();
assert(size > used); // at least one element
internal_array_type new_array(values, used, level);
STXXL_ASSERT(new_array.int_memory() ==
internal_array_type::int_memory(capacity));
m_internal_arrays.swap_back(new_array);
if (!extract_buffer_empty()) {
m_stats.num_new_internal_arrays++;
m_stats.max_num_new_internal_arrays.set_max(
m_stats.num_new_internal_arrays);
m_minima.add_internal_array(
static_cast<unsigned>(m_internal_arrays.size()) - 1
);
}
m_internal_size += size - used;
m_mem_left -= internal_array_type::int_memory(capacity);
STXXL_CHECK(level < c_max_internal_levels &&
"Internal array level is larger than anything possible "
"in this universe. Increase the size of m_internal_levels");
++m_internal_levels[level];
m_stats.max_num_internal_arrays.set_max(m_internal_arrays.size());
// if IA level is too large ...
if (m_internal_levels[level] < c_max_internal_level_size) return;
unsigned_type level_size = 0;
size_type int_memory = 0;
std::vector<iterator_pair_type> sequences;
std::vector<unsigned_type> ia_index;
for (unsigned_type i = 0; i < m_internal_arrays.size(); ++i)
{
if (m_internal_arrays[i].level() != level) continue;
if (m_internal_arrays[i].empty()) continue;
level_size += m_internal_arrays[i].size();
int_memory += m_internal_arrays[i].int_memory();
sequences.push_back(std::make_pair(m_internal_arrays[i].begin(),
m_internal_arrays[i].end()));
ia_index.push_back(i);
}
// AND there is enough RAM to merge it (without flushing out to EA).
if (m_mem_left < internal_array_type::int_memory(level_size)) return;
// must free up more memory than the new array needs.
STXXL_ASSERT(int_memory >= internal_array_type::int_memory(level_size));
STXXL_DEBUG("merging internal arrays" <<
" level=" << level <<
" level_size=" << level_size <<
" sequences=" << sequences.size());
std::vector<value_type> merged_array(level_size);
potentially_parallel::multiway_merge(
sequences.begin(), sequences.end(),
merged_array.begin(), level_size, m_inv_compare);
// release memory of old internal arrays immediately
for (unsigned_type i = 0; i < ia_index.size(); ++i)
{
unsigned_type ia = ia_index[i];
m_internal_arrays[ia].make_empty();
// this is done in cleanup_internal_arrays()...
//if (ia < m_minima.ia_slots())
// m_minima.deactivate_internal_array(ia);
}
cleanup_internal_arrays();
// in add_as_internal_array the level_size is re-added!
m_internal_size -= level_size;
// add as new internal array at next level (and maybe recursively merge)
add_as_internal_array(merged_array, 0, level + 1);
}
/*!
* Sorts the values from values and writes them into an internal array.
* Don't use the value vector afterwards!
*
* \param values the vector to sort and store
*/
void flush_array_internal(std::vector<value_type>& values)
{
potentially_parallel::sort(values.begin(), values.end(), m_inv_compare);
// flush until enough memory for new array
flush_ia_ea_until_memory_free(
internal_array_type::int_memory(values.size())
);
add_as_internal_array(values);
}
//! Struct of all statistical counters and timers. Turn on/off statistics
//! using the stats_counter and stats_timer typedefs.
struct stats_type
{
//! Largest number of elements in the extract buffer at the same time
stats_counter max_extract_buffer_size;
//! Sum of the sizes of each extract buffer refill. Used for average
//! size.
stats_counter total_extract_buffer_size;
//! Largest number of elements in the merge buffer when running
//! flush_internal_arrays()
stats_counter max_merge_buffer_size;
//! Total number of extracts
stats_counter num_extracts;
//! Number of refill_extract_buffer() calls
stats_counter num_extract_buffer_refills;
//! Number of flush_insertion_heaps() calls
stats_counter num_insertion_heap_flushes;
//! Number of flush_directly_to_hd() calls
stats_counter num_direct_flushes;
//! Number of flush_internal_arrays() calls
stats_counter num_internal_array_flushes;
//! Number of merge_external_arrays() calls
stats_counter num_external_array_merges;
//! Largest number of internal arrays at the same time
stats_counter max_num_internal_arrays;
//! Largest number of external arrays at the same time
stats_counter max_num_external_arrays;
//! Temporary number of new external arrays at the same time (which
//! were created while the extract buffer hadn't been empty)
stats_counter num_new_external_arrays;
//! Largest number of new external arrays at the same time (which were
//! created while the extract buffer hadn't been empty)
stats_counter max_num_new_external_arrays;
//! Temporary number of new internal arrays at the same time (which
//! were created while the extract buffer hadn't been empty)
stats_counter num_new_internal_arrays;
//! Largest number of new internal arrays at the same time (which were
//! created while the extract buffer hadn't been empty)
stats_counter max_num_new_internal_arrays;
//! Total time for flush_insertion_heaps()
stats_timer insertion_heap_flush_time;
//! Total time for flush_directly_to_hd()
stats_timer direct_flush_time;
//! Total time for flush_internal_arrays()
stats_timer internal_array_flush_time;
//! Total time for merge_external_arrays()
stats_timer external_array_merge_time;
//! Total time for extract_min()
stats_timer extract_min_time;
//! Total time for refill_extract_buffer()
stats_timer refill_extract_buffer_time;
//! Total time for the merging in refill_extract_buffer()
//! Part of refill_extract_buffer_time.
stats_timer refill_merge_time;
//! Total time for all things before merging in refill_extract_buffer()
//! Part of refill_extract_buffer_time.
stats_timer refill_time_before_merge;
//! Total time for all things after merging in refill_extract_buffer()
//! Part of refill_extract_buffer_time.
stats_timer refill_time_after_merge;
//! Total time of wait() calls in first part of
//! refill_extract_buffer(). Part of refill_time_before_merge and
//! refill_extract_buffer_time.
stats_timer refill_wait_time;
//! Total time for pop_heap() in extract_min().
//! Part of extract_min_time.
stats_timer pop_heap_time;
//! Total time for merging the sorted heaps.
//! Part of flush_insertion_heaps.
stats_timer merge_sorted_heaps_time;
//! Total time for std::lower_bound calls in refill_extract_buffer()
//! Part of refill_extract_buffer_time and refill_time_before_merge.
// stats_timer refill_lower_bound_time;
//! Total time for std::accumulate calls in refill_extract_buffer()
//! Part of refill_extract_buffer_time and refill_time_before_merge.
stats_timer refill_accumulate_time;
//! Total time for determining the smallest max value in refill_extract_buffer()
//! Part of refill_extract_buffer_time and refill_time_before_merge.
stats_timer refill_minmax_time;
stats_timer hint_time;
friend std::ostream& operator << (std::ostream& os, const stats_type& o)
{
return os << "max_extract_buffer_size=" << o.max_extract_buffer_size.as_memory_amount(sizeof(value_type)) << std::endl
<< "total_extract_buffer_size=" << o.total_extract_buffer_size.as_memory_amount(sizeof(value_type)) << std::endl
<< "max_merge_buffer_size=" << o.max_merge_buffer_size.as_memory_amount(sizeof(value_type)) << std::endl
<< "num_extracts=" << o.num_extracts << std::endl
<< "num_extract_buffer_refills=" << o.num_extract_buffer_refills << std::endl
<< "num_insertion_heap_flushes=" << o.num_insertion_heap_flushes << std::endl
<< "num_direct_flushes=" << o.num_direct_flushes << std::endl
<< "num_internal_array_flushes=" << o.num_internal_array_flushes << std::endl
<< "num_external_array_merges=" << o.num_external_array_merges << std::endl
<< "max_num_internal_arrays=" << o.max_num_internal_arrays << std::endl
<< "max_num_external_arrays=" << o.max_num_external_arrays << std::endl
<< "num_new_external_arrays=" << o.num_new_external_arrays << std::endl
<< "max_num_new_external_arrays=" << o.max_num_new_external_arrays << std::endl
<< "num_new_internal_arrays=" << o.num_new_internal_arrays << std::endl
<< "max_num_new_internal_arrays=" << o.max_num_new_internal_arrays << std::endl
<< "insertion_heap_flush_time=" << o.insertion_heap_flush_time << std::endl
<< "direct_flush_time=" << o.direct_flush_time << std::endl
<< "internal_array_flush_time=" << o.internal_array_flush_time << std::endl
<< "external_array_merge_time=" << o.external_array_merge_time << std::endl
<< "extract_min_time=" << o.extract_min_time << std::endl
<< "refill_extract_buffer_time=" << o.refill_extract_buffer_time << std::endl
<< "refill_merge_time=" << o.refill_merge_time << std::endl
<< "refill_time_before_merge=" << o.refill_time_before_merge << std::endl
<< "refill_time_after_merge=" << o.refill_time_after_merge << std::endl
<< "refill_wait_time=" << o.refill_wait_time << std::endl
<< "pop_heap_time=" << o.pop_heap_time << std::endl
<< "merge_sorted_heaps_time=" << o.merge_sorted_heaps_time << std::endl
// << "refill_lower_bound_time=" << o.refill_lower_bound_time << std::endl
<< "refill_accumulate_time=" << o.refill_accumulate_time << std::endl
<< "refill_minmax_time=" << o.refill_minmax_time << std::endl
<< "hint_time=" << o.hint_time << std::endl;
}
};
stats_type m_stats;
};
// For C++98 compatibility:
template <
class ValueType,
class CompareType,
class AllocStrategy,
uint64 BlockSize,
uint64 DefaultMemSize,
uint64 MaxItems
>
const double parallel_priority_queue<ValueType, CompareType, AllocStrategy, BlockSize,
DefaultMemSize, MaxItems>::c_default_extract_buffer_ram_part = 0.05;
STXXL_END_NAMESPACE
#endif // !STXXL_CONTAINERS_PARALLEL_PRIORITY_QUEUE_HEADER
|
8449.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for simd schedule(static, 1)
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute thread_limit(128) simd
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
interpolation_pq.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
static inline void interpolation_pq_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){
// interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[]
int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block
int write_dim_j = block->dim.j<<1;
int write_dim_k = block->dim.k<<1;
int read_i = block->read.i;
int read_j = block->read.j;
int read_k = block->read.k;
int read_jStride = block->read.jStride;
int read_kStride = block->read.kStride;
int write_i = block->write.i;
int write_j = block->write.j;
int write_k = block->write.k;
int write_jStride = block->write.jStride;
int write_kStride = block->write.kStride;
double * __restrict__ read = block->read.ptr;
double * __restrict__ write = block->write.ptr;
if(block->read.box >=0){
read = level_c->my_boxes[ block->read.box].vectors[ id_c] + level_c->my_boxes[ block->read.box].ghosts*(1+level_c->my_boxes[ block->read.box].jStride+level_c->my_boxes[ block->read.box].kStride);
read_jStride = level_c->my_boxes[block->read.box ].jStride;
read_kStride = level_c->my_boxes[block->read.box ].kStride;
}
if(block->write.box>=0){
write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->my_boxes[block->write.box].ghosts*(1+level_f->my_boxes[block->write.box].jStride+level_f->my_boxes[block->write.box].kStride);
write_jStride = level_f->my_boxes[block->write.box].jStride;
write_kStride = level_f->my_boxes[block->write.box].kStride;
}
int i,j,k;
double OneOver32Cubed = 1.0/32768.0;
for(k=0;k<write_dim_k;k++){int delta_k=-read_kStride;if(k&0x1)delta_k=read_kStride;
for(j=0;j<write_dim_j;j++){int delta_j=-read_jStride;if(j&0x1)delta_j=read_jStride;
for(i=0;i<write_dim_i;i++){int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward
int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride);
int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride);
//
// | -3/32 | 30/32 | 5/32 |
// |---+---|---+---|---+---|
// | | | | x | | |
//
write[write_ijk] = prescale_f*write[write_ijk] +
OneOver32Cubed*(
-27.0*read[read_ijk-delta_i-delta_j-delta_k] +
270.0*read[read_ijk -delta_j-delta_k] +
45.0*read[read_ijk+delta_i-delta_j-delta_k] +
270.0*read[read_ijk-delta_i -delta_k] +
-2700.0*read[read_ijk -delta_k] +
-450.0*read[read_ijk+delta_i -delta_k] +
45.0*read[read_ijk-delta_i+delta_j-delta_k] +
-450.0*read[read_ijk +delta_j-delta_k] +
-75.0*read[read_ijk+delta_i+delta_j-delta_k] +
270.0*read[read_ijk-delta_i-delta_j ] +
-2700.0*read[read_ijk -delta_j ] +
-450.0*read[read_ijk+delta_i-delta_j ] +
-2700.0*read[read_ijk-delta_i ] +
27000.0*read[read_ijk ] +
4500.0*read[read_ijk+delta_i ] +
-450.0*read[read_ijk-delta_i+delta_j ] +
4500.0*read[read_ijk +delta_j ] +
750.0*read[read_ijk+delta_i+delta_j ] +
45.0*read[read_ijk-delta_i-delta_j+delta_k] +
-450.0*read[read_ijk -delta_j+delta_k] +
-75.0*read[read_ijk+delta_i-delta_j+delta_k] +
-450.0*read[read_ijk-delta_i +delta_k] +
4500.0*read[read_ijk +delta_k] +
750.0*read[read_ijk+delta_i +delta_k] +
-75.0*read[read_ijk-delta_i+delta_j+delta_k] +
750.0*read[read_ijk +delta_j+delta_k] +
125.0*read[read_ijk+delta_i+delta_j+delta_k]
);
}}}
}
//------------------------------------------------------------------------------------------------------------------------------
// perform a (inter-level) piecewise quadratic interpolation
void interpolation_pq(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){
exchange_boundary(level_c,id_c,0);
apply_BCs_quadratic(level_c,id_c,0);
uint64_t _timeCommunicationStart = CycleTime();
uint64_t _timeStart,_timeEnd;
int buffer=0;
int n;
int my_tag = (level_f->tag<<4) | 0x7;
#ifdef USE_MPI
// by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends...
int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs;
MPI_Request *recv_requests = level_f->interpolation.requests;
MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs;
// loop through packed list of MPI receives and prepost Irecv's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_f->interpolation.num_recvs;n++){
MPI_Irecv(level_f->interpolation.recv_buffers[n],
level_f->interpolation.recv_sizes[n],
MPI_DOUBLE,
level_f->interpolation.recv_ranks[n],
my_tag,
MPI_COMM_WORLD,
&recv_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_recv += (_timeEnd-_timeStart);
// pack MPI send buffers...
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0])
for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){
// !!! prescale==0 because you don't want to increment the MPI buffer
interpolation_pq_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_pack += (_timeEnd-_timeStart);
// loop through MPI send buffers and post Isend's...
_timeStart = CycleTime();
#ifdef USE_MPI_THREAD_MULTIPLE
#pragma omp parallel for schedule(dynamic,1)
#endif
for(n=0;n<level_c->interpolation.num_sends;n++){
MPI_Isend(level_c->interpolation.send_buffers[n],
level_c->interpolation.send_sizes[n],
MPI_DOUBLE,
level_c->interpolation.send_ranks[n],
my_tag,
MPI_COMM_WORLD,
&send_requests[n]
);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_send += (_timeEnd-_timeStart);
#endif
// perform local interpolation... try and hide within Isend latency...
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1])
for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){
interpolation_pq_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_local += (_timeEnd-_timeStart);
// wait for MPI to finish...
#ifdef USE_MPI
_timeStart = CycleTime();
if(nMessages)MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status);
_timeEnd = CycleTime();
level_f->cycles.interpolation_wait += (_timeEnd-_timeStart);
// unpack MPI receive buffers
_timeStart = CycleTime();
PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2])
for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){
IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]);
}
_timeEnd = CycleTime();
level_f->cycles.interpolation_unpack += (_timeEnd-_timeStart);
#endif
level_f->cycles.interpolation_total += (uint64_t)(CycleTime()-_timeCommunicationStart);
}
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <cinttypes>
#include <cstddef>
#include <iostream>
#include <type_traits>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
void PrintEdgeWeight() {
std::cout << " -- v: " << v << "(" << w << ")\n";
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used for *non-negative* offsets within a neighborhood
typedef std::make_unsigned<std::ptrdiff_t>::type OffsetT;
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
OffsetT start_offset_;
public:
Neighborhood(NodeID_ n, DestID_** g_index, OffsetT start_offset) :
n_(n), g_index_(g_index), start_offset_(0) {
OffsetT max_offset = end() - begin();
start_offset_ = std::min(start_offset, max_offset);
}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_] + start_offset_; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
num_edges_ = out_index_[num_nodes_] - out_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n, OffsetT start_offset = 0) const {
return Neighborhood(n, out_index_, start_offset);
}
Neighborhood in_neigh(NodeID_ n, OffsetT start_offset = 0) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_, start_offset);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
/*
Helper function to print outgoing neighbors
of a node with their weights
*/
void PrintNeighbors(NodeID_ node_id) const {
std::cout << "Printing neighbors for " << node_id << std::endl;
for(auto v : out_neigh(node_id))
std::cout << " -- v: " << v.v << " (" << v.w << ")" << std::endl;
}
/*
Function to calculate the difference between
max and min timestamp difference from all
outgoing edges from a node.
TODO: integrate this while building a graph so
we don't have to recompute this every time?
*/
float TimeBoundsDelta(NodeID_ node_id) const {
// PrintNeighbors(node_id);
float min_bound = 0, max_bound = 0;
int cnt = 0;
for(auto v : out_neigh(node_id)) {
if(cnt == 0)
min_bound = max_bound = v.w;
if(v.w < min_bound)
min_bound = v.w;
if(v.w > max_bound)
max_bound = v.w;
cnt++;
}
return (max_bound - min_bound);
}
bool EdgeExists(NodeID_ src_node, NodeID_ dst_node) const {
for(auto v : out_neigh(src_node))
{
if(v.v == dst_node)
return true;
}
return false;
}
public:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
backproject_singledata.c | /*
Function to perform cone beam back projection of Nikon XTek Custom Bay data in a single step.
Published as part of the SophiaBeads Datasets project.
*/
/*
References:
David Szotten, Limited Data Problems in X-ray and Polarized Light Tomography. PhD Thesis, School of Mathematics, University of Manchester. 2011.
William Michael Thompson, Source firing patterns and reconstruction algorithms for a switched source, offset detector CT machine. PhD Thesis, School of Mathematics, University of Manchester. 2011.
Nicola Wadeson, Aluminium foam data reconstruction using CGLS and TV Regularisation - 100 and 200 projection data. MIMS Preprint, School of Mathematics, University of Manchester. August 2012.
*/
/*
Copyright (c) 2015 Sophia Bethany Coban, William Michael Thompson, Nicola Wadeson and David Szotten
Code is available via the SophiaBeads Datasets project.
University of Manchester.
*/
#include "jacobs_rays.h"
#include "omp.h"
/* Initialize. */
int equal_to_precision(double x, double y, double precision);
double alpha_fn(int n, double p1, double p2, double b, double d);
double p(double alpha, double p1, double p2);
double phi(double alpha, double p1, double p2, double b, double d);
double min_dbl(double a, double b) { return a < b ? a : b; };
double max_dbl(double a, double b) { return a > b ? a : b; };
double min3_dbl(double a, double b, double c) { return a < b ? min_dbl(a,c) : min_dbl(b,c); };
double max3_dbl(double a, double b, double c) { return a > b ? max_dbl(a,c) : max_dbl(b,c); };
double ceil_j( double arg ) { return arg == (int)arg ? arg+1 : ceil( arg ); };
double floor_j( double arg ) { return floor( arg ); };
void backproject_singledata(int im_size, double *start, double *end, float *ray_data, float *vol_data, struct jacobs_options *options)
{
/* Initialize. */
int N_x, N_y, N_z, N_p, im_size_x, im_size_y, im_size_z;
double b_x, b_y, b_z, d_x, d_y, d_z, d_conv;
double p1_x, p1_y, p1_z, p2_x, p2_y, p2_z;
int x_defined, y_defined, z_defined;
int i,j,k;
double alpha_x_min, alpha_y_min, alpha_z_min, alpha_x_max, alpha_y_max,
alpha_z_max, alpha_min, alpha_max, alpha_x, alpha_y, alpha_z, alpha_c;
double alpha_x_u, alpha_y_u, alpha_z_u;
double l_ij, voxel_radius;
int i_min, j_min, k_min, i_max, j_max, k_max, n_count, i_u, j_u, k_u;
double a, b;
int ray_index;
/* Copy information. */
p1_x = start[0];
p1_y = start[1];
p1_z = start[2];
p2_x = end[0];
p2_y = end[1];
p2_z = end[2];
im_size_x = options->im_size_x;
im_size_y = options->im_size_y;
im_size_z = options->im_size_z;
N_x=im_size_x+1;
N_y=im_size_y+1;
N_z=im_size_z+1;
/* d: voxel size */
d_x = options->d_x;
d_y = options->d_y;
d_z = options->d_z;
/* b: grid offset from origin */
b_x = options->b_x;
b_y = options->b_y;
b_z = options->b_z;
/* use total lengh=alpha_max-alpha_min instead, to get an average, not sum. */
/* moving back to original d_conv */
d_conv=sqrt( (p1_x-p2_x)*(p1_x-p2_x) + (p1_y-p2_y)*(p1_y-p2_y) + (p1_z-p2_z)*(p1_z-p2_z));
/* x, y or z are defined if planes are not equal! */
x_defined = !(equal_to_precision(p1_x,p2_x,PRECISION));
y_defined = !(equal_to_precision(p1_y,p2_y,PRECISION));
z_defined = !(equal_to_precision(p1_z,p2_z,PRECISION));
/* Jacob's ray tracing algorithm... */
if( !x_defined && !y_defined && !z_defined)
return;
if (x_defined) {
alpha_x_min=min_dbl(alpha_fn(0, p1_x, p2_x, b_x, d_x), alpha_fn(N_x-1, p1_x, p2_x, b_x, d_x));
alpha_x_max=max_dbl(alpha_fn(0, p1_x, p2_x, b_x, d_x), alpha_fn(N_x-1, p1_x, p2_x, b_x, d_x));
}
else {
alpha_x_min=-2;
alpha_x_max=2;
i=(int) floor_j( phi(0.0, p1_x, p2_x, b_x, d_x));
if ( i < 0 || i >= im_size_x)
return;
alpha_x=2;
i_min = 1;
i_max = 0;
}
if(y_defined) {
alpha_y_min=min_dbl(alpha_fn(0, p1_y, p2_y, b_y, d_y), alpha_fn(N_y-1, p1_y, p2_y, b_y, d_y));
alpha_y_max=max_dbl(alpha_fn(0, p1_y, p2_y, b_y, d_y), alpha_fn(N_y-1, p1_y, p2_y, b_y, d_y));
}
else {
alpha_y_min=-2;
alpha_y_max=2;
j=(int) floor_j( phi(0.0, p1_y, p2_y, b_y, d_y));
if ( j < 0 || j >= im_size_y)
return;
alpha_y=2;
j_min = 1;
j_max = 0;
}
if(z_defined) {
alpha_z_min=min_dbl(alpha_fn(0, p1_z, p2_z, b_z, d_z), alpha_fn(N_z-1, p1_z, p2_z, b_z, d_z));
alpha_z_max=max_dbl(alpha_fn(0, p1_z, p2_z, b_z, d_z), alpha_fn(N_z-1, p1_z, p2_z, b_z, d_z));
}
else {
alpha_z_min=-2;
alpha_z_max=2;
k=(int) floor_j( phi(0.0, p1_z, p2_z, b_z, d_z));
if ( k < 0 || k >= im_size_z)
return;
alpha_z=2;
k_min = 1;
k_max = 0;
}
alpha_min=max_dbl(0.0, max3_dbl(alpha_x_min, alpha_y_min, alpha_z_min));
alpha_max=min_dbl(1.0, min3_dbl(alpha_x_max, alpha_y_max, alpha_z_max));
/* If ray intersects voxel grid */
if (alpha_min < alpha_max) {
if (x_defined && p1_x < p2_x) {
if (equal_to_precision(alpha_min,alpha_x_min,PRECISION)==1)
i_min=1;
else
i_min = (int) ceil_j(phi(alpha_min, p1_x, p2_x, b_x, d_x));
if (equal_to_precision(alpha_max,alpha_x_max,PRECISION)==1)
i_max = N_x - 1;
else
i_max = (int) floor_j( phi(alpha_max, p1_x, p2_x, b_x, d_x));
alpha_x=alpha_fn(i_min, p1_x, p2_x, b_x, d_x);
}
else if (x_defined) {
if (equal_to_precision(alpha_min,alpha_x_min,PRECISION)==1)
i_max=N_x-2;
else
i_max = (int) floor_j(phi(alpha_min, p1_x, p2_x, b_x, d_x));
if (equal_to_precision(alpha_max,alpha_x_max,PRECISION)==1)
i_min = 0;
else
i_min = (int) ceil_j( phi(alpha_max, p1_x, p2_x, b_x, d_x));
alpha_x=alpha_fn(i_max, p1_x, p2_x, b_x, d_x);
}
if (y_defined && p1_y < p2_y) {
if (equal_to_precision(alpha_min,alpha_y_min,PRECISION)==1)
j_min=1;
else
j_min = (int) ceil_j(phi(alpha_min, p1_y, p2_y, b_y, d_y));
if (equal_to_precision(alpha_max, alpha_y_max,PRECISION)==1)
j_max = N_y - 1;
else
j_max = (int) floor_j( phi(alpha_max, p1_y, p2_y, b_y, d_y));
alpha_y=alpha_fn(j_min, p1_y, p2_y, b_y, d_y);
}
else if (y_defined) {
if (equal_to_precision(alpha_min,alpha_y_min,PRECISION)==1)
j_max=N_y-2;
else
j_max = (int) floor_j(phi(alpha_min, p1_y, p2_y, b_y, d_y));
if (equal_to_precision(alpha_max, alpha_y_max, PRECISION)==1)
j_min = 0;
else
j_min = (int) ceil_j( phi(alpha_max, p1_y, p2_y, b_y, d_y));
alpha_y=alpha_fn(j_max, p1_y, p2_y, b_y, d_y);
}
if (z_defined && p1_z < p2_z) {
if (equal_to_precision(alpha_min,alpha_z_min,PRECISION)==1)
k_min=1;
else
k_min = (int) ceil_j(phi(alpha_min, p1_z, p2_z, b_z, d_z));
if (equal_to_precision(alpha_max, alpha_z_max,PRECISION)==1)
k_max = N_z - 1;
else
k_max = (int) floor_j( phi(alpha_max, p1_z, p2_z, b_z, d_z));
alpha_z=alpha_fn(k_min, p1_z, p2_z, b_z, d_z);
}
else if (z_defined) {
if (equal_to_precision(alpha_min,alpha_z_min,PRECISION)==1)
k_max=N_z-2;
else
k_max = (int) floor_j(phi(alpha_min, p1_z, p2_z, b_z, d_z));
if (equal_to_precision(alpha_max, alpha_z_max, PRECISION)==1)
k_min = 0;
else
k_min = (int) ceil_j( phi(alpha_max, p1_z, p2_z, b_z, d_z));
alpha_z=alpha_fn(k_max, p1_z, p2_z, b_z, d_z);
}
N_p=(i_max - i_min +1) + (j_max - j_min + 1) + (k_max - k_min + 1);
if (x_defined) {
i=(int) floor_j( phi( (min3_dbl(alpha_x, alpha_y, alpha_z) + alpha_min)/2, p1_x, p2_x, b_x, d_x) );
alpha_x_u = d_x/fabs(p2_x-p1_x);
}
if (y_defined) {
j=(int) floor_j( phi( (min3_dbl(alpha_x, alpha_y, alpha_z) + alpha_min)/2, p1_y, p2_y, b_y, d_y) );
alpha_y_u = d_y/fabs(p2_y-p1_y);
}
if (z_defined) {
k=(int) floor_j( phi( (min3_dbl(alpha_x, alpha_y, alpha_z) + alpha_min)/2, p1_z, p2_z, b_z, d_z) );
alpha_z_u = d_z/fabs(p2_z-p1_z);
}
if (p1_x < p2_x)
i_u=1;
else
i_u=-1;
if (p1_y < p2_y)
j_u=1;
else
j_u=-1;
if (p1_z < p2_z)
k_u=1;
else
k_u=-1;
alpha_c=alpha_min;
for (n_count=1; n_count<N_p+1;n_count++) {
/* x smallest */
if (x_defined && alpha_x <= alpha_y && alpha_x <= alpha_z) {
/* Ray intersects pixel(i,j) with length l_ij */
ray_index = k*im_size_y*im_size_x + j*im_size_x + i;
#pragma omp atomic
vol_data[ray_index] += ((float) ((alpha_x-alpha_c)*d_conv)) * (*ray_data);
if( y_defined && alpha_x == alpha_y) {
j += j_u;
n_count++;
alpha_y += alpha_y_u;
}
if( z_defined && alpha_x == alpha_z) {
k += k_u;
n_count++;
alpha_z += alpha_z_u;
}
i += i_u;
alpha_c=alpha_x;
alpha_x += alpha_x_u;
}
/* y smallest */
else if (y_defined && alpha_y <= alpha_z) {
/* Ray intersects pixel(i,j) with length l_ij */
ray_index = k*im_size_y*im_size_x + j*im_size_x + i;
#pragma omp atomic
vol_data[ray_index] += ((float) ((alpha_y-alpha_c)*d_conv)) * (*ray_data);
if( z_defined && alpha_y == alpha_z) {
k += k_u;
n_count++;
alpha_z += alpha_z_u;
}
j=j+j_u;
alpha_c=alpha_y;
alpha_y += alpha_y_u;
}
/* z smallest */
else if (z_defined) {
/* Ray intersects pixel(i,j) with length l_ij */
ray_index = k*im_size_y*im_size_x + j*im_size_x + i;
#pragma omp atomic
vol_data[ray_index] += ((float) ((alpha_z-alpha_c)*d_conv)) * (*ray_data);
k += k_u;
alpha_c=alpha_z;
alpha_z += alpha_z_u;
}
/* Have we looped too far? */
if( i < 0 || j < 0 || k < 0 || i >= im_size_x || j >= im_size_y || k >= im_size_z)
/* Artificially end loop */
N_p = n_count - 1;
} /* end of for loop though N_p */
/* In case we're ending inside grid, finish off last voxel */
if( (alpha_max - alpha_c) > PRECISION) {
/* This is the last step so don't need to worry about incrementing i or j */
l_ij=(alpha_max-alpha_c)*d_conv;
ray_index = k*im_size_y*im_size_x + j*im_size_y + i;
#pragma omp atomic
vol_data[ray_index] += ((float) l_ij) * (*ray_data);
}
} /* of alpha_min < alpha_max */
return;
}
double alpha_fn(int n, double p1, double p2, double b, double d)
{
return ( (b+n*d) - p1)/(p2-p1);
}
double phi(double alpha, double p1, double p2, double b, double d)
{
return ( p(alpha, p1, p2)-b)/d;
}
double p(double alpha, double p1, double p2)
{
return p1+alpha*(p2-p1);
}
int equal_to_precision(double x, double y, double prec)
{
return fabs(x-y) < prec;
}
|
generate_local_ranks_parallel.c | #include <omp.h>
#include "utils.h"
#include "algorithm.h"
/**
This sorts regions of SA with the same current_rank by their next_rank-
the value a distance 2^h away. It outputs runs of (curr, next, count)
**/
//create RunRecord triplet and sort
void sort_and_output_group(int * sa_buffer, long * next_ranks_buffer, long current_rank,
int start_interval, int end_interval, FILE *runsFP){
int i;
tsort(&sa_buffer[start_interval], next_ranks_buffer, end_interval-start_interval);
RunRecord output;
output.currentRank = current_rank;
output.count = 1;
output.nextRank = next_ranks_buffer[sa_buffer[start_interval]];
//find runs and write them to output
for (i = start_interval+1; i < end_interval; i++) {
if (next_ranks_buffer[sa_buffer[i]] != output.nextRank) {
Fwrite (&output, sizeof(RunRecord), 1, runsFP);
output.count = 1;
output.nextRank = next_ranks_buffer[sa_buffer[i]];
}
else {
output.count++;
}
}
Fwrite (&output, sizeof(RunRecord), 1, runsFP);
}
int generate_local_runs_parallel (char * rank_dir, char * runs_dir, int total_chunks,
int chunk_id, int h) {
//Determine which additional chunk must be loaded
int size_order = 0;
while((WORKING_CHUNK_SIZE >> size_order) > 1) {size_order++;}
int next_chunk_dist = h > size_order ? 1<<(h-size_order) : 0;
//printf("%d,%d\n",size_order,next_chunk_dist);
if ((next_chunk_dist + chunk_id) > total_chunks-1){
// #pragma omp barrier
// #pragma omp barrier
return EMPTY;
}
char runs_file_name [MAX_PATH_LENGTH];
FILE *runsFP = NULL;
sprintf (runs_file_name, "%s/runs_%d", runs_dir, chunk_id);
OpenBinaryFileAppend(&runsFP, runs_file_name);
int i, r, total_records = 0;
FILE *currentFP = NULL;
FILE *nextFP = NULL;
FILE *saFP = NULL;
// FILE *summaryFP = NULL;
// char summary_file_name [MAX_PATH_LENGTH];
char current_ranks_file_name [MAX_PATH_LENGTH];
char next_ranks_file_name [MAX_PATH_LENGTH];
char sa_file_name [MAX_PATH_LENGTH];
//allocate buffers
long *current_ranks_buffer = (long *) Calloc ((WORKING_CHUNK_SIZE) *sizeof (long));
long *next_ranks_buffer = (long *) Calloc ((WORKING_CHUNK_SIZE) *sizeof (long));
int *sa_buffer = (int *) Calloc ((WORKING_CHUNK_SIZE) *sizeof (int));
// sprintf (summary_file_name, "%s/merge_summary", runs_dir);
sprintf (current_ranks_file_name, "%s/ranks_%d", rank_dir, chunk_id);
sprintf (sa_file_name, "%s/sa_%d", rank_dir, chunk_id);
//open current rank and sa file
OpenBinaryFileRead (¤tFP, current_ranks_file_name);
OpenBinaryFileReadWrite (&saFP, sa_file_name);
// OpenBinaryFileWrite (&summaryFP, summary_file_name);
//handle reading next_rank
if (next_chunk_dist) {
sprintf (next_ranks_file_name, "%s/ranks_%d", rank_dir, chunk_id+next_chunk_dist);
OpenBinaryFileRead (&nextFP, next_ranks_file_name);
fread (next_ranks_buffer, sizeof (long), WORKING_CHUNK_SIZE, nextFP);
} else{
sprintf (next_ranks_file_name, "%s/ranks_%d", rank_dir, chunk_id);
OpenBinaryFileRead (&nextFP, next_ranks_file_name);
if(fseek(nextFP, (1 << h)*sizeof(long), SEEK_SET)) {
printf ("Fseek failed trying to move to position %d in ranks file\n", (1 << h));
exit (1);
}
r = fread (next_ranks_buffer, sizeof (long), WORKING_CHUNK_SIZE, nextFP);
fclose(nextFP);
if (chunk_id+1 < total_chunks) {
sprintf (next_ranks_file_name, "%s/ranks_%d", rank_dir, chunk_id+1);
OpenBinaryFileRead (&nextFP, next_ranks_file_name);
fread (next_ranks_buffer + r, sizeof (long), (1<<h), nextFP);
fclose (nextFP);
}
}
//offset next rank by 2^h
//read file by chunk, sort and generate triplet for each chunk
total_records=fread (current_ranks_buffer, sizeof (long), WORKING_CHUNK_SIZE, currentFP);
fclose (currentFP);
// fread (next_ranks_buffer, sizeof (long), WORKING_CHUNK_SIZE, nextFP);
r = fread (sa_buffer, sizeof (int), WORKING_CHUNK_SIZE, saFP);
if (r != total_records) {
printf("Unexpected error: SA has different size %d than ranks array %d\n", r, total_records);
return FAILURE;
}
// printf("Thread %d got here", omp_get_thread_num());
// #pragma omp barrier
int finished = 1;
int start_interval = 0;
int end_interval;
long previous_rank = current_ranks_buffer[sa_buffer[0]];
long current_rank;
//Read through current_ranks_buffer until it changes. Then sort based on next_rank.
for (i=1; i < total_records; i++) {
current_rank = current_ranks_buffer[sa_buffer[i]];
if (current_rank != previous_rank) {
if (previous_rank > 0) {
finished = 0;
end_interval = i;
//sort, generate runs
sort_and_output_group(sa_buffer, next_ranks_buffer, previous_rank,
start_interval, end_interval, runsFP);
}
start_interval = i;
previous_rank = current_rank;
}
}
if (previous_rank > 0) {
finished = 0;
end_interval = total_records;
sort_and_output_group(sa_buffer, next_ranks_buffer, previous_rank,
start_interval, end_interval, runsFP);
}
fclose(runsFP);
runsFP = NULL;
// if (chunk_empty == 0) {
// finished = 0;
// Fwrite (&pos_infile, sizeof(int), 1, summaryFP);
// }
//return pointer to the beginning of the sa chunk
// #pragma omp barrier
fseek ( saFP, -(total_records )*sizeof(int), SEEK_CUR );
Fwrite (sa_buffer, sizeof(int), total_records, saFP);
// pos_infile += total_records;
fclose(saFP);
// fclose (summaryFP);
free (sa_buffer);
free (current_ranks_buffer);
free (next_ranks_buffer);
if (finished)
return EMPTY;
return SUCCESS;
}
int main(int argc, char ** argv){
char * rank_dir;
char * runs_dir;
int h, chunk_id, total_chunks;
if (argc<5) {
puts ("Run ./generate_local_runs <rank_dir> <runs_dir> <total_chunks> <order>");
return FAILURE;
}
//Read inputs
rank_dir = argv[1];
runs_dir = argv[2];
total_chunks = atoi(argv[3]);
h = atoi(argv[4]);
int more_runs = EMPTY;
#pragma omp parallel for schedule(static, 1) num_threads(NUM_THREADS) private(chunk_id)
for (chunk_id = 0; chunk_id < total_chunks; chunk_id++){
int result = generate_local_runs_parallel (rank_dir, runs_dir, total_chunks, chunk_id, h);
if (result == FAILURE){
more_runs = FAILURE;
}
if (result != EMPTY && more_runs != FAILURE){
more_runs = SUCCESS;
}
}
return more_runs;
}
|
element.h | /* All or part of this file was contributed by Intel under license:
* Copyright (C) 2017-2018 Intel Corporation
* SPDX-License-Identifier: MIT
*/
#pragma once
#include "tensors/tensor.h"
namespace marian {
namespace cpu {
template <size_t K, bool broadcast, class Functor>
void gElement(Functor functor,
gpu::Array<gpu::Tensor<float>, K> tensors) {
int length = tensors[0].shape().elements();
gpu::Array<int, gpu::Shape::size()> dims;
gpu::Array<int, K> indices;
#pragma omp parallel for simd
for(int index = 0; index < length; ++index) {
indices.fill(index);
if(broadcast) {
tensors[0].shape().dims(index, dims);
for(int i = 1; i < K; ++i)
indices[i] = tensors[i].shape().bindex(dims);
}
tensors[0][index] = gpu::apply(functor, tensors, indices);
}
}
template <class Functor, class ...Tensors>
void Element(Functor functor, marian::Tensor out, Tensors ...tensors) {
constexpr size_t K = sizeof...(tensors) + 1;
gpu::Array<gpu::Tensor<float>, K> gTensors = {out, tensors...};
int length = gTensors[0].shape().elements();
bool broadcast = false;
for(int i = 1; i < K; ++i)
broadcast = broadcast || gTensors[0].shape() != gTensors[i].shape();
if(broadcast)
cpu::gElement<K, true>(functor, gTensors);
else
cpu::gElement<K, false>(functor, gTensors);
}
}
}
|
constant_density_acoustic_time_scalar_1D_6.h | #ifndef __CDA_TIME_SCALAR_1D_6__
#define __CDA_TIME_SCALAR_1D_6__
#include <stdlib.h>
template< typename T, int ACCURACY >
void cda_time_scalar_1D_6( T* km1_u, int nr_km1_u, int nc_km1_u, // in - padded wavefield shape
T* k_Phiz, int nr_k_Phiz, int nc_k_Phiz, // in - padded wavefield shape
T* k_u, int nr_k_u, int nc_k_u, // in - padded wavefield shape
T* C, int nr_C, int nc_C, // in - padded wavefield shape
T* rhs, int nr_rhs, int nc_rhs, // in - padded wavefield shape
T* zlpml, int n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* zrpml, int n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
double const& dt, // in
double const& dz, // in
int const& nz, // in
T* kp1_Phiz, int nr_kp1_Phiz, int nc_kp1_Phiz, // out
T* kp1_u, int nr_kp1_u, int nc_kp1_u ) // out
{
enum {MAX_FD_SHIFT = ACCURACY/2};
// PML variable
T sigmaz = 0.0;
// Time delta variables
T dt2 = dt*dt;
// Loop/index variables
int idx;
int zstride=1;
int s = zstride;
// Loop public variables
T dv = dz;
T dv2 = dz*dz;
// Loop private variables
// derivatives
T dU;
T dPhi;
T lapU = 0.0;
// non derivatives
T fac1;
T fac2;
// assignin the NUMBER of threads
char* NUM = getenv("OMP_NUM_THREADS");
int Num_Th = atoi (NUM);
#pragma omp parallel num_threads(Num_Th) private(dU, dPhi, lapU, sigmaz, idx, fac1, fac2) shared(dv, dv2, s, k_u,k_Phiz,kp1_Phiz, kp1_u, rhs, C, dt2, dt, km1_u, zlpml, n_zrpml)
{
#pragma omp for
for(int k=0; k < nz; k++)
{
idx = k;
kp1_Phiz[idx] = 0.0;
kp1_u[idx] = 0.0;
if ((k == 0) || (k == nz-1)) continue;
lapU = 0.0;
if (k==0)
{
//decentered derivative 3 ranks on the right
dU = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*0.0+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./60.)*k_u[idx+3*s])/dv;
dPhi = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*0.0+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*k_Phiz[idx+2*s]+(1./60.)*k_Phiz[idx+3*s])/dv;
lapU += ((1./90.)*0.0+(-3./20.)*0.0+(3./2.)*0.0+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./90.)*k_u[idx+3*s])/dv2;
}
else if (k == 1)
{
//decentered derivative 2 rank on the right
dU = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./60.)*k_u[idx+3*s])/dv;
dPhi = ((-1./60.)*0.0+(3./20.)*0.0+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*k_Phiz[idx+2*s]+(1./60.)*k_Phiz[idx+3*s])/dv;
lapU += ((1./90.)*0.0+(-3./20.)*0.0+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./90.)*k_u[idx+3*s])/dv2;
}
else if (k == 2)
{
//decentered derivative 1 rank on the right
dU = ((-1./60.)*0.0+(3./20.)*k_u[idx-2*s]+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./60.)*k_u[idx+3*s])/dv;
dPhi = ((-1./60.)*0.0+(3./20.)*k_Phiz[idx-2*s]+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*k_Phiz[idx+2*s]+(1./60.)*k_Phiz[idx+3*s])/dv;
lapU += ((1./90.)*0.0+(-3./20.)*k_u[idx-2*s]+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./90.)*k_u[idx+3*s])/dv2;
}
else if (k == nz-1)
{
//decentered derivative 3 ranks on the left
dU = ((-1./60.)*k_u[idx-3*s]+(3./20.)*k_u[idx-2*s]+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*0.0+(-3./20.)*0.0+(1./60.)*0.0)/dv;
dPhi = ((-1./60.)*k_Phiz[idx-3*s]+(3./20.)*k_Phiz[idx-2*s]+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*0.0+(-3./20.)*0.0+(1./60.)*0.0)/dv;
lapU += ((1./90.)*k_u[idx-3*s]+(-3./20.)*k_u[idx-2*s]+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*0.0+(-3./20.)*0.0+(1./90.)*0.0)/dv2;
}
else if (k == nz-2)
{
//decentered derivative 2 ranks on the left
dU = ((-1./60.)*k_u[idx-3*s]+(3./20.)*k_u[idx-2*s]+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*0.0+(1./60.)*0.0)/dv;
dPhi = ((-1./60.)*k_Phiz[idx-3*s]+(3./20.)*k_Phiz[idx-2*s]+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*0.0+(1./60.)*0.0)/dv;
lapU += ((1./90.)*k_u[idx-3*s]+(-3./20.)*k_u[idx-2*s]+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*0.0+(1./90.)*0.0)/dv2;
}
else if (k == nz-3)
{
//decentered derivative 1 rank on the left
dU = ((-1./60.)*k_u[idx-3*s]+(3./20.)*k_u[idx-2*s]+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./60.)*0.0)/dv;
dPhi = ((-1./60.)*k_Phiz[idx-3*s]+(3./20.)*k_Phiz[idx-2*s]+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*k_Phiz[idx+2*s]+(1./60.)*0.0)/dv;
lapU += ((1./90.)*k_u[idx-3*s]+(-3./20.)*k_u[idx-2*s]+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./90.)*0.0)/dv2;
}
else
{
//classic centered derivative
dU = ((-1./60.)*k_u[idx-3*s]+(3./20.)*k_u[idx-2*s]+(-3./4.)*k_u[idx-s]+0.0+(3./4.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./60.)*k_u[idx+3*s])/dv;
dPhi = ((-1./60.)*k_Phiz[idx-3*s]+(3./20.)*k_Phiz[idx-2*s]+(-3./4.)*k_Phiz[idx-s]+0.0+(3./4.)*k_Phiz[idx+s]+(-3./20.)*k_Phiz[idx+2*s]+(1./60.)*k_Phiz[idx+3*s])/dv;
lapU += ((1./90.)*k_u[idx-3*s]+(-3./20.)*k_u[idx-2*s]+(3./2.)*k_u[idx-s]+(-49./18.)*k_u[idx]+(3./2.)*k_u[idx+s]+(-3./20.)*k_u[idx+2*s]+(1./90.)*k_u[idx+3*s])/dv2;
}
sigmaz = 0.0;
if((n_zlpml>0) && (k < n_zlpml))
{
sigmaz = zlpml[k];
}
else if((n_zrpml>0) && (k >= nz-n_zrpml))
{
sigmaz = zrpml[n_zrpml-((nz-1)-k)];
}
if(sigmaz != 0.0)
{
kp1_Phiz[idx] = k_Phiz[idx] - dt * sigmaz*(k_Phiz[idx] + dU);
fac1 = (2.0*dt2 / (2.0 + dt*sigmaz));
fac2 = (C[idx]*C[idx])*(rhs[idx]+lapU+dPhi) - (km1_u[idx]-2.0*k_u[idx])/dt2 + sigmaz*km1_u[idx]/(2.0*dt);
kp1_u[idx] = fac1 * fac2;
}
else
{
kp1_Phiz[idx] = k_Phiz[idx];
kp1_u[idx] = dt2*(C[idx]*C[idx])*(rhs[idx]+lapU+dPhi) - (km1_u[idx]-2.0*k_u[idx]);
}
}
}
};
template< typename T>
void cda_time_scalar_1D_OMP_6( T* km1_u, int nr_km1_u, int nc_km1_u, // in - padded wavefield shape
T* k_Phiz, int nr_k_Phiz, int nc_k_Phiz, // in - padded wavefield shape
T* k_u, int nr_k_u, int nc_k_u, // in - padded wavefield shape
T* C, int nr_C, int nc_C, // in - padded wavefield shape
T* rhs, int nr_rhs, int nc_rhs, // in - padded wavefield shape
T* zlpml, int n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* zrpml, int n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
double const& dt, // in
double const& dz, // in
int const& nz, // in
T* kp1_Phiz, int nr_kp1_Phiz, int nc_kp1_Phiz, // out
T* kp1_u, int nr_kp1_u, int nc_kp1_u ) // out
{
cda_time_scalar_1D_6<T,6>( km1_u, nr_km1_u, nc_km1_u, // in - padded wavefield shape
k_Phiz, nr_k_Phiz, nc_k_Phiz, // in - padded wavefield shape
k_u, nr_k_u, nc_k_u, // in - padded wavefield shape
C, nr_C, nc_C, // in - padded wavefield shape
rhs, nr_rhs, nc_rhs, // in - padded wavefield shape
zlpml, n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
zrpml, n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
dt, // in
dz, // in
nz, // in
kp1_Phiz, nr_kp1_Phiz, nc_kp1_Phiz, // out
kp1_u, nr_kp1_u, nc_kp1_u ); // out
}
#endif
|
integrateFullOrbit.c | /*
Wrappers around the C integration code for Full Orbits
*/
#ifdef _WIN32
#include <Python.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_spline.h>
#include <bovy_coords.h>
#include <bovy_symplecticode.h>
#include <leung_dop853.h>
#include <bovy_rk.h>
#include <integrateFullOrbit.h>
//Potentials
#include <galpy_potentials.h>
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#ifndef ORBITS_CHUNKSIZE
#define ORBITS_CHUNKSIZE 1
#endif
//Macros to export functions in DLL on different OS
#if defined(_WIN32)
#define EXPORT __declspec(dllexport)
#elif defined(__GNUC__)
#define EXPORT __attribute__((visibility("default")))
#else
// Just do nothing?
#define EXPORT
#endif
#ifdef _WIN32
// On Windows, *need* to define this function to allow the package to be imported
#if PY_MAJOR_VERSION >= 3
PyMODINIT_FUNC PyInit_libgalpy(void) { // Python 3
return NULL;
}
#else
PyMODINIT_FUNC initlibgalpy(void) {} // Python 2
#endif
#endif
/*
Function Declarations
*/
void evalRectForce(double, double *, double *,
int, struct potentialArg *);
void evalRectDeriv(double, double *, double *,
int, struct potentialArg *);
void evalRectDeriv_dxdv(double,double *, double *,
int, struct potentialArg *);
void initMovingObjectSplines(struct potentialArg *, double ** pot_args);
void initChandrasekharDynamicalFrictionSplines(struct potentialArg *, double ** pot_args);
/*
Actual functions
*/
void parse_leapFuncArgs_Full(int npot,
struct potentialArg * potentialArgs,
int ** pot_type,
double ** pot_args){
int ii,jj,kk;
int nR, nz, nr;
double * Rgrid, * zgrid, * potGrid_splinecoeffs;
init_potentialArgs(npot,potentialArgs);
for (ii=0; ii < npot; ii++){
switch ( *(*pot_type)++ ) {
case 0: //LogarithmicHaloPotential, 4 arguments
potentialArgs->potentialEval= &LogarithmicHaloPotentialEval;
potentialArgs->Rforce= &LogarithmicHaloPotentialRforce;
potentialArgs->zforce= &LogarithmicHaloPotentialzforce;
potentialArgs->phiforce= &LogarithmicHaloPotentialphiforce;
potentialArgs->dens= &LogarithmicHaloPotentialDens;
//potentialArgs->R2deriv= &LogarithmicHaloPotentialR2deriv;
//potentialArgs->planarphi2deriv= &ZeroForce;
//potentialArgs->planarRphideriv= &ZeroForce;
potentialArgs->nargs= 4;
potentialArgs->requiresVelocity= false;
break;
case 1: //DehnenBarPotential, 6 arguments
potentialArgs->Rforce= &DehnenBarPotentialRforce;
potentialArgs->phiforce= &DehnenBarPotentialphiforce;
potentialArgs->zforce= &DehnenBarPotentialzforce;
potentialArgs->nargs= 6;
potentialArgs->requiresVelocity= false;
break;
case 5: //MiyamotoNagaiPotential, 3 arguments
potentialArgs->potentialEval= &MiyamotoNagaiPotentialEval;
potentialArgs->Rforce= &MiyamotoNagaiPotentialRforce;
potentialArgs->zforce= &MiyamotoNagaiPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &MiyamotoNagaiPotentialDens;
//potentialArgs->R2deriv= &MiyamotoNagaiPotentialR2deriv;
//potentialArgs->planarphi2deriv= &ZeroForce;
//potentialArgs->planarRphideriv= &ZeroForce;
potentialArgs->nargs= 3;
potentialArgs->requiresVelocity= false;
break;
case 7: //PowerSphericalPotential, 2 arguments
potentialArgs->potentialEval= &PowerSphericalPotentialEval;
potentialArgs->Rforce= &PowerSphericalPotentialRforce;
potentialArgs->zforce= &PowerSphericalPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &PowerSphericalPotentialDens;
//potentialArgs->R2deriv= &PowerSphericalPotentialR2deriv;
//potentialArgs->planarphi2deriv= &ZeroForce;
//potentialArgs->planarRphideriv= &ZeroForce;
potentialArgs->nargs= 2;
potentialArgs->requiresVelocity= false;
break;
case 8: //HernquistPotential, 2 arguments
potentialArgs->potentialEval= &HernquistPotentialEval;
potentialArgs->Rforce= &HernquistPotentialRforce;
potentialArgs->zforce= &HernquistPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &HernquistPotentialDens;
//potentialArgs->R2deriv= &HernquistPotentialR2deriv;
//potentialArgs->planarphi2deriv= &ZeroForce;
//potentialArgs->planarRphideriv= &ZeroForce;
potentialArgs->nargs= 2;
potentialArgs->requiresVelocity= false;
break;
case 9: //NFWPotential, 2 arguments
potentialArgs->potentialEval= &NFWPotentialEval;
potentialArgs->Rforce= &NFWPotentialRforce;
potentialArgs->zforce= &NFWPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &NFWPotentialDens;
//potentialArgs->R2deriv= &NFWPotentialR2deriv;
//potentialArgs->planarphi2deriv= &ZeroForce;
//potentialArgs->planarRphideriv= &ZeroForce;
potentialArgs->nargs= 2;
potentialArgs->requiresVelocity= false;
break;
case 10: //JaffePotential, 2 arguments
potentialArgs->potentialEval= &JaffePotentialEval;
potentialArgs->Rforce= &JaffePotentialRforce;
potentialArgs->zforce= &JaffePotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &JaffePotentialDens;
//potentialArgs->R2deriv= &JaffePotentialR2deriv;
//potentialArgs->planarphi2deriv= &ZeroForce;
//potentialArgs->planarRphideriv= &ZeroForce;
potentialArgs->nargs= 2;
potentialArgs->requiresVelocity= false;
break;
case 11: //DoubleExponentialDiskPotential, XX arguments
potentialArgs->potentialEval= &DoubleExponentialDiskPotentialEval;
potentialArgs->Rforce= &DoubleExponentialDiskPotentialRforce;
potentialArgs->zforce= &DoubleExponentialDiskPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &DoubleExponentialDiskPotentialDens;
//Look at pot_args to figure out the number of arguments
potentialArgs->nargs= (int) (8 + 2 * *(*pot_args+5) + 4 * ( *(*pot_args+4) + 1 ));
potentialArgs->requiresVelocity= false;
break;
case 12: //FlattenedPowerPotential, 4 arguments
potentialArgs->potentialEval= &FlattenedPowerPotentialEval;
potentialArgs->Rforce= &FlattenedPowerPotentialRforce;
potentialArgs->zforce= &FlattenedPowerPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &FlattenedPowerPotentialDens;
potentialArgs->nargs= 4;
potentialArgs->requiresVelocity= false;
break;
case 13: //interpRZPotential, XX arguments
//Grab the grids and the coefficients
nR= (int) *(*pot_args)++;
nz= (int) *(*pot_args)++;
Rgrid= (double *) malloc ( nR * sizeof ( double ) );
zgrid= (double *) malloc ( nz * sizeof ( double ) );
potGrid_splinecoeffs= (double *) malloc ( nR * nz * sizeof ( double ) );
for (kk=0; kk < nR; kk++)
*(Rgrid+kk)= *(*pot_args)++;
for (kk=0; kk < nz; kk++)
*(zgrid+kk)= *(*pot_args)++;
for (kk=0; kk < nR; kk++)
put_row(potGrid_splinecoeffs,kk,*pot_args+kk*nz,nz);
*pot_args+= nR*nz;
potentialArgs->i2d= interp_2d_alloc(nR,nz);
interp_2d_init(potentialArgs->i2d,Rgrid,zgrid,potGrid_splinecoeffs,
INTERP_2D_LINEAR); //latter bc we already calculated the coeffs
potentialArgs->accx= gsl_interp_accel_alloc ();
potentialArgs->accy= gsl_interp_accel_alloc ();
for (kk=0; kk < nR; kk++)
put_row(potGrid_splinecoeffs,kk,*pot_args+kk*nz,nz);
*pot_args+= nR*nz;
potentialArgs->i2drforce= interp_2d_alloc(nR,nz);
interp_2d_init(potentialArgs->i2drforce,Rgrid,zgrid,potGrid_splinecoeffs,
INTERP_2D_LINEAR); //latter bc we already calculated the coeffs
potentialArgs->accxrforce= gsl_interp_accel_alloc ();
potentialArgs->accyrforce= gsl_interp_accel_alloc ();
for (kk=0; kk < nR; kk++)
put_row(potGrid_splinecoeffs,kk,*pot_args+kk*nz,nz);
*pot_args+= nR*nz;
potentialArgs->i2dzforce= interp_2d_alloc(nR,nz);
interp_2d_init(potentialArgs->i2dzforce,Rgrid,zgrid,potGrid_splinecoeffs,
INTERP_2D_LINEAR); //latter bc we already calculated the coeffs
potentialArgs->accxzforce= gsl_interp_accel_alloc ();
potentialArgs->accyzforce= gsl_interp_accel_alloc ();
potentialArgs->potentialEval= &interpRZPotentialEval;
potentialArgs->Rforce= &interpRZPotentialRforce;
potentialArgs->zforce= &interpRZPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->nargs= 2;
//clean up
free(Rgrid);
free(zgrid);
free(potGrid_splinecoeffs);
potentialArgs->requiresVelocity= false;
break;
case 14: //IsochronePotential, 2 arguments
potentialArgs->potentialEval= &IsochronePotentialEval;
potentialArgs->Rforce= &IsochronePotentialRforce;
potentialArgs->zforce= &IsochronePotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &IsochronePotentialDens;
potentialArgs->nargs= 2;
potentialArgs->requiresVelocity= false;
break;
case 15: //PowerSphericalwCutoffPotential, 3 arguments
potentialArgs->potentialEval= &PowerSphericalPotentialwCutoffEval;
potentialArgs->Rforce= &PowerSphericalPotentialwCutoffRforce;
potentialArgs->zforce= &PowerSphericalPotentialwCutoffzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &PowerSphericalPotentialwCutoffDens;
//potentialArgs->R2deriv= &PowerSphericalPotentialR2deriv;
//potentialArgs->planarphi2deriv= &ZeroForce;
//potentialArgs->planarRphideriv= &ZeroForce;
potentialArgs->nargs= 3;
potentialArgs->requiresVelocity= false;
break;
case 16: //KuzminKutuzovStaeckelPotential, 3 arguments
potentialArgs->potentialEval= &KuzminKutuzovStaeckelPotentialEval;
potentialArgs->Rforce= &KuzminKutuzovStaeckelPotentialRforce;
potentialArgs->zforce= &KuzminKutuzovStaeckelPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
//potentialArgs->R2deriv= &KuzminKutuzovStaeckelPotentialR2deriv;
potentialArgs->nargs= 3;
potentialArgs->requiresVelocity= false;
break;
case 17: //PlummerPotential, 2 arguments
potentialArgs->potentialEval= &PlummerPotentialEval;
potentialArgs->Rforce= &PlummerPotentialRforce;
potentialArgs->zforce= &PlummerPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &PlummerPotentialDens;
//potentialArgs->R2deriv= &PlummerPotentialR2deriv;
potentialArgs->nargs= 2;
potentialArgs->requiresVelocity= false;
break;
case 18: //PseudoIsothermalPotential, 2 arguments
potentialArgs->potentialEval= &PseudoIsothermalPotentialEval;
potentialArgs->Rforce= &PseudoIsothermalPotentialRforce;
potentialArgs->zforce= &PseudoIsothermalPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &PseudoIsothermalPotentialDens;
//potentialArgs->R2deriv= &PseudoIsothermalPotentialR2deriv;
potentialArgs->nargs= 2;
potentialArgs->requiresVelocity= false;
break;
case 19: //KuzminDiskPotential, 2 arguments
potentialArgs->potentialEval= &KuzminDiskPotentialEval;
potentialArgs->Rforce= &KuzminDiskPotentialRforce;
potentialArgs->zforce= &KuzminDiskPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->nargs= 2;
potentialArgs->requiresVelocity= false;
break;
case 20: //BurkertPotential, 2 arguments
potentialArgs->potentialEval= &BurkertPotentialEval;
potentialArgs->Rforce= &BurkertPotentialRforce;
potentialArgs->zforce= &BurkertPotentialzforce;
potentialArgs->dens= &BurkertPotentialDens;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->nargs= 2;
potentialArgs->requiresVelocity= false;
break;
case 21: //TriaxialHernquistPotential, lots of arguments
potentialArgs->potentialEval= &EllipsoidalPotentialEval;
potentialArgs->Rforce = &EllipsoidalPotentialRforce;
potentialArgs->zforce = &EllipsoidalPotentialzforce;
potentialArgs->phiforce = &EllipsoidalPotentialphiforce;
potentialArgs->dens= &EllipsoidalPotentialDens;
// Also assign functions specific to EllipsoidalPotential
potentialArgs->psi= &TriaxialHernquistPotentialpsi;
potentialArgs->mdens= &TriaxialHernquistPotentialmdens;
potentialArgs->mdensDeriv= &TriaxialHernquistPotentialmdensDeriv;
potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args
+ (int) (*(*pot_args+7) + 20)));
potentialArgs->requiresVelocity= false;
break;
case 22: //TriaxialNFWPotential, lots of arguments
potentialArgs->potentialEval= &EllipsoidalPotentialEval;
potentialArgs->Rforce = &EllipsoidalPotentialRforce;
potentialArgs->zforce = &EllipsoidalPotentialzforce;
potentialArgs->phiforce = &EllipsoidalPotentialphiforce;
potentialArgs->dens= &EllipsoidalPotentialDens;
// Also assign functions specific to EllipsoidalPotential
potentialArgs->psi= &TriaxialNFWPotentialpsi;
potentialArgs->mdens= &TriaxialNFWPotentialmdens;
potentialArgs->mdensDeriv= &TriaxialNFWPotentialmdensDeriv;
potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args
+ (int) (*(*pot_args+7) + 20)));
potentialArgs->requiresVelocity= false;
break;
case 23: //TriaxialJaffePotential, lots of arguments
potentialArgs->potentialEval= &EllipsoidalPotentialEval;
potentialArgs->Rforce = &EllipsoidalPotentialRforce;
potentialArgs->zforce = &EllipsoidalPotentialzforce;
potentialArgs->phiforce = &EllipsoidalPotentialphiforce;
potentialArgs->dens= &EllipsoidalPotentialDens;
// Also assign functions specific to EllipsoidalPotential
potentialArgs->psi= &TriaxialJaffePotentialpsi;
potentialArgs->mdens= &TriaxialJaffePotentialmdens;
potentialArgs->mdensDeriv= &TriaxialJaffePotentialmdensDeriv;
potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args
+ (int) (*(*pot_args+7) + 20)));
potentialArgs->requiresVelocity= false;
break;
case 24: //SCFPotential, many arguments
potentialArgs->potentialEval= &SCFPotentialEval;
potentialArgs->Rforce= &SCFPotentialRforce;
potentialArgs->zforce= &SCFPotentialzforce;
potentialArgs->phiforce= &SCFPotentialphiforce;
potentialArgs->dens= &SCFPotentialDens;
potentialArgs->nargs= (int) (5 + (1 + *(*pot_args + 1)) * *(*pot_args+2) * *(*pot_args+3)* *(*pot_args+4) + 7);
potentialArgs->requiresVelocity= false;
break;
case 25: //SoftenedNeedleBarPotential, 13 arguments
potentialArgs->potentialEval= &SoftenedNeedleBarPotentialEval;
potentialArgs->Rforce= &SoftenedNeedleBarPotentialRforce;
potentialArgs->zforce= &SoftenedNeedleBarPotentialzforce;
potentialArgs->phiforce= &SoftenedNeedleBarPotentialphiforce;
potentialArgs->nargs= (int) 13;
potentialArgs->requiresVelocity= false;
break;
case 26: //DiskSCFPotential, nsigma+3 arguments
potentialArgs->potentialEval= &DiskSCFPotentialEval;
potentialArgs->Rforce= &DiskSCFPotentialRforce;
potentialArgs->zforce= &DiskSCFPotentialzforce;
potentialArgs->dens= &DiskSCFPotentialDens;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->nargs= (int) **pot_args + 3;
potentialArgs->requiresVelocity= false;
break;
case 27: // SpiralArmsPotential, 10 arguments + array of Cs
potentialArgs->Rforce = &SpiralArmsPotentialRforce;
potentialArgs->zforce = &SpiralArmsPotentialzforce;
potentialArgs->phiforce = &SpiralArmsPotentialphiforce;
//potentialArgs->R2deriv = &SpiralArmsPotentialR2deriv;
//potentialArgs->z2deriv = &SpiralArmsPotentialz2deriv;
potentialArgs->phi2deriv = &SpiralArmsPotentialphi2deriv;
//potentialArgs->Rzderiv = &SpiralArmsPotentialRzderiv;
potentialArgs->Rphideriv = &SpiralArmsPotentialRphideriv;
potentialArgs->nargs = (int) 10 + **pot_args;
potentialArgs->requiresVelocity= false;
break;
case 30: // PerfectEllipsoidPotential, lots of arguments
potentialArgs->potentialEval= &EllipsoidalPotentialEval;
potentialArgs->Rforce = &EllipsoidalPotentialRforce;
potentialArgs->zforce = &EllipsoidalPotentialzforce;
potentialArgs->phiforce = &EllipsoidalPotentialphiforce;
potentialArgs->dens= &EllipsoidalPotentialDens;
//potentialArgs->R2deriv = &EllipsoidalPotentialR2deriv;
//potentialArgs->z2deriv = &EllipsoidalPotentialz2deriv;
//potentialArgs->phi2deriv = &EllipsoidalPotentialphi2deriv;
//potentialArgs->Rzderiv = &EllipsoidalPotentialRzderiv;
//potentialArgs->Rphideriv = &EllipsoidalPotentialRphideriv;
// Also assign functions specific to EllipsoidalPotential
potentialArgs->psi= &PerfectEllipsoidPotentialpsi;
potentialArgs->mdens= &PerfectEllipsoidPotentialmdens;
potentialArgs->mdensDeriv= &PerfectEllipsoidPotentialmdensDeriv;
potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args
+ (int) (*(*pot_args+7) + 20)));
potentialArgs->requiresVelocity= false;
break;
// 31: KGPotential
// 32: IsothermalDiskPotential
case 33: //DehnenCoreSphericalPotential, 2 arguments
potentialArgs->potentialEval= &DehnenCoreSphericalPotentialEval;
potentialArgs->Rforce= &DehnenCoreSphericalPotentialRforce;
potentialArgs->zforce= &DehnenCoreSphericalPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &DehnenCoreSphericalPotentialDens;
//potentialArgs->R2deriv= &DehnenCoreSphericalPotentialR2deriv;
//potentialArgs->planarphi2deriv= &ZeroForce;
//potentialArgs->planarRphideriv= &ZeroForce;
potentialArgs->nargs= 2;
potentialArgs->requiresVelocity= false;
break;
case 34: //DehnenSphericalPotential, 3 arguments
potentialArgs->potentialEval= &DehnenSphericalPotentialEval;
potentialArgs->Rforce= &DehnenSphericalPotentialRforce;
potentialArgs->zforce= &DehnenSphericalPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &DehnenSphericalPotentialDens;
//potentialArgs->R2deriv= &DehnenSphericalPotentialR2deriv;
//potentialArgs->planarphi2deriv= &ZeroForce;
//potentialArgs->planarRphideriv= &ZeroForce;
potentialArgs->nargs= 3;
potentialArgs->requiresVelocity= false;
break;
case 35: //HomogeneousSpherePotential, 3 arguments
potentialArgs->potentialEval= &HomogeneousSpherePotentialEval;
potentialArgs->Rforce= &HomogeneousSpherePotentialRforce;
potentialArgs->zforce= &HomogeneousSpherePotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &HomogeneousSpherePotentialDens;
potentialArgs->nargs= 3;
potentialArgs->requiresVelocity= false;
break;
case 36: //interpSphericalPotential, XX arguments
// Set up 1 spline in potentialArgs
potentialArgs->nspline1d= 1;
potentialArgs->spline1d= (gsl_spline **) \
malloc ( potentialArgs->nspline1d*sizeof ( gsl_spline *) );
potentialArgs->acc1d= (gsl_interp_accel **) \
malloc ( potentialArgs->nspline1d * sizeof ( gsl_interp_accel * ) );
// allocate accelerator
*potentialArgs->acc1d= gsl_interp_accel_alloc();
// Set up interpolater
nr= (int) **pot_args;
*potentialArgs->spline1d= gsl_spline_alloc(gsl_interp_cspline,nr);
gsl_spline_init(*potentialArgs->spline1d,*pot_args+1,*pot_args+1+nr,nr);
*pot_args+= 2*nr+1;
// Bind forces
potentialArgs->potentialEval= &SphericalPotentialEval;
potentialArgs->Rforce = &SphericalPotentialRforce;
potentialArgs->zforce = &SphericalPotentialzforce;
potentialArgs->phiforce= &ZeroForce;
potentialArgs->dens= &SphericalPotentialDens;
// Also assign functions specific to SphericalPotential
potentialArgs->revaluate= &interpSphericalPotentialrevaluate;
potentialArgs->rforce= &interpSphericalPotentialrforce;
potentialArgs->r2deriv= &interpSphericalPotentialr2deriv;
potentialArgs->rdens= &interpSphericalPotentialrdens;
potentialArgs->nargs = (int) 6;
potentialArgs->requiresVelocity= false;
break;
case 37: // TriaxialGaussianPotential, lots of arguments
potentialArgs->potentialEval= &EllipsoidalPotentialEval;
potentialArgs->Rforce = &EllipsoidalPotentialRforce;
potentialArgs->zforce = &EllipsoidalPotentialzforce;
potentialArgs->phiforce = &EllipsoidalPotentialphiforce;
potentialArgs->dens= &EllipsoidalPotentialDens;
//potentialArgs->R2deriv = &EllipsoidalPotentialR2deriv;
//potentialArgs->z2deriv = &EllipsoidalPotentialz2deriv;
//potentialArgs->phi2deriv = &EllipsoidalPotentialphi2deriv;
//potentialArgs->Rzderiv = &EllipsoidalPotentialRzderiv;
//potentialArgs->Rphideriv = &EllipsoidalPotentialRphideriv;
// Also assign functions specific to EllipsoidalPotential
potentialArgs->psi= &TriaxialGaussianPotentialpsi;
potentialArgs->mdens= &TriaxialGaussianPotentialmdens;
potentialArgs->mdensDeriv= &TriaxialGaussianPotentialmdensDeriv;
potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args
+ (int) (*(*pot_args+7) + 20)));
potentialArgs->requiresVelocity= false;
break;
//////////////////////////////// WRAPPERS /////////////////////////////////////
case -1: //DehnenSmoothWrapperPotential
potentialArgs->potentialEval= &DehnenSmoothWrapperPotentialEval;
potentialArgs->Rforce= &DehnenSmoothWrapperPotentialRforce;
potentialArgs->zforce= &DehnenSmoothWrapperPotentialzforce;
potentialArgs->phiforce= &DehnenSmoothWrapperPotentialphiforce;
potentialArgs->nargs= (int) 4;
potentialArgs->requiresVelocity= false;
break;
case -2: //SolidBodyRotationWrapperPotential
potentialArgs->Rforce= &SolidBodyRotationWrapperPotentialRforce;
potentialArgs->zforce= &SolidBodyRotationWrapperPotentialzforce;
potentialArgs->phiforce= &SolidBodyRotationWrapperPotentialphiforce;
potentialArgs->nargs= (int) 3;
potentialArgs->requiresVelocity= false;
break;
case -4: //CorotatingRotationWrapperPotential
potentialArgs->Rforce= &CorotatingRotationWrapperPotentialRforce;
potentialArgs->zforce= &CorotatingRotationWrapperPotentialzforce;
potentialArgs->phiforce= &CorotatingRotationWrapperPotentialphiforce;
potentialArgs->nargs= (int) 5;
potentialArgs->requiresVelocity= false;
break;
case -5: //GaussianAmplitudeWrapperPotential
potentialArgs->potentialEval= &GaussianAmplitudeWrapperPotentialEval;
potentialArgs->Rforce= &GaussianAmplitudeWrapperPotentialRforce;
potentialArgs->zforce= &GaussianAmplitudeWrapperPotentialzforce;
potentialArgs->phiforce= &GaussianAmplitudeWrapperPotentialphiforce;
potentialArgs->nargs= (int) 3;
potentialArgs->requiresVelocity= false;
break;
case -6: //MovingObjectPotential
potentialArgs->Rforce= &MovingObjectPotentialRforce;
potentialArgs->zforce= &MovingObjectPotentialzforce;
potentialArgs->phiforce= &MovingObjectPotentialphiforce;
potentialArgs->nargs= (int) 3;
potentialArgs->requiresVelocity= false;
break;
case -7: //ChandrasekharDynamicalFrictionForce
potentialArgs->RforceVelocity= &ChandrasekharDynamicalFrictionForceRforce;
potentialArgs->zforceVelocity= &ChandrasekharDynamicalFrictionForcezforce;
potentialArgs->phiforceVelocity= &ChandrasekharDynamicalFrictionForcephiforce;
potentialArgs->nargs= (int) 16;
potentialArgs->requiresVelocity= true;
break;
}
int setupMovingObjectSplines = *(*pot_type-1) == -6 ? 1 : 0;
int setupChandrasekharDynamicalFrictionSplines = *(*pot_type-1) == -7 ? 1 : 0;
if ( *(*pot_type-1) < 0 ) { // Parse wrapped potential for wrappers
potentialArgs->nwrapped= (int) *(*pot_args)++;
potentialArgs->wrappedPotentialArg= \
(struct potentialArg *) malloc ( potentialArgs->nwrapped \
* sizeof (struct potentialArg) );
parse_leapFuncArgs_Full(potentialArgs->nwrapped,
potentialArgs->wrappedPotentialArg,
pot_type,pot_args);
}
if (setupMovingObjectSplines)
initMovingObjectSplines(potentialArgs, pot_args);
if (setupChandrasekharDynamicalFrictionSplines)
initChandrasekharDynamicalFrictionSplines(potentialArgs,pot_args);
potentialArgs->args= (double *) malloc( potentialArgs->nargs * sizeof(double));
for (jj=0; jj < potentialArgs->nargs; jj++){
*(potentialArgs->args)= *(*pot_args)++;
potentialArgs->args++;
}
potentialArgs->args-= potentialArgs->nargs;
potentialArgs++;
}
potentialArgs-= npot;
}
EXPORT void integrateFullOrbit(int nobj,
double *yo,
int nt,
double *t,
int npot,
int * pot_type,
double * pot_args,
double dt,
double rtol,
double atol,
double *result,
int * err,
int odeint_type){
//Set up the forces, first count
int ii,jj;
int dim;
int max_threads;
int * thread_pot_type;
double * thread_pot_args;
max_threads= ( nobj < omp_get_max_threads() ) ? nobj : omp_get_max_threads();
// Because potentialArgs may cache, safest to have one / thread
struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( max_threads * npot * sizeof (struct potentialArg) );
#pragma omp parallel for schedule(static,1) private(ii,thread_pot_type,thread_pot_args) num_threads(max_threads)
for (ii=0; ii < max_threads; ii++) {
thread_pot_type= pot_type; // need to make thread-private pointers, bc
thread_pot_args= pot_args; // these pointers are changed in parse_...
parse_leapFuncArgs_Full(npot,potentialArgs+ii*npot,
&thread_pot_type,&thread_pot_args);
}
//Integrate
void (*odeint_func)(void (*func)(double, double *, double *,
int, struct potentialArg *),
int,
double *,
int, double, double *,
int, struct potentialArg *,
double, double,
double *,int *);
void (*odeint_deriv_func)(double, double *, double *,
int,struct potentialArg *);
switch ( odeint_type ) {
case 0: //leapfrog
odeint_func= &leapfrog;
odeint_deriv_func= &evalRectForce;
dim= 3;
break;
case 1: //RK4
odeint_func= &bovy_rk4;
odeint_deriv_func= &evalRectDeriv;
dim= 6;
break;
case 2: //RK6
odeint_func= &bovy_rk6;
odeint_deriv_func= &evalRectDeriv;
dim= 6;
break;
case 3: //symplec4
odeint_func= &symplec4;
odeint_deriv_func= &evalRectForce;
dim= 3;
break;
case 4: //symplec6
odeint_func= &symplec6;
odeint_deriv_func= &evalRectForce;
dim= 3;
break;
case 5: //DOPR54
odeint_func= &bovy_dopr54;
odeint_deriv_func= &evalRectDeriv;
dim= 6;
break;
case 6: //DOP853
odeint_func= &dop853;
odeint_deriv_func= &evalRectDeriv;
dim= 6;
break;
}
#pragma omp parallel for schedule(dynamic,ORBITS_CHUNKSIZE) private(ii,jj) num_threads(max_threads)
for (ii=0; ii < nobj; ii++) {
cyl_to_rect_galpy(yo+6*ii);
odeint_func(odeint_deriv_func,dim,yo+6*ii,nt,dt,t,
npot,potentialArgs+omp_get_thread_num()*npot,rtol,atol,
result+6*nt*ii,err+ii);
for (jj=0; jj < nt; jj++)
rect_to_cyl_galpy(result+6*jj+6*nt*ii);
}
//Free allocated memory
#pragma omp parallel for schedule(static,1) private(ii) num_threads(max_threads)
for (ii=0; ii < max_threads; ii++)
free_potentialArgs(npot,potentialArgs+ii*npot);
free(potentialArgs);
//Done!
}
// LCOV_EXCL_START
void integrateOrbit_dxdv(double *yo,
int nt,
double *t,
int npot,
int * pot_type,
double * pot_args,
double rtol,
double atol,
double *result,
int * err,
int odeint_type){
//Set up the forces, first count
int dim;
struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) );
parse_leapFuncArgs_Full(npot,potentialArgs,&pot_type,&pot_args);
//Integrate
void (*odeint_func)(void (*func)(double, double *, double *,
int, struct potentialArg *),
int,
double *,
int, double, double *,
int, struct potentialArg *,
double, double,
double *,int *);
void (*odeint_deriv_func)(double, double *, double *,
int,struct potentialArg *);
switch ( odeint_type ) {
case 0: //leapfrog
odeint_func= &leapfrog;
odeint_deriv_func= &evalRectForce;
dim= 6;
break;
case 1: //RK4
odeint_func= &bovy_rk4;
odeint_deriv_func= &evalRectDeriv_dxdv;
dim= 12;
break;
case 2: //RK6
odeint_func= &bovy_rk6;
odeint_deriv_func= &evalRectDeriv_dxdv;
dim= 12;
break;
case 3: //symplec4
odeint_func= &symplec4;
odeint_deriv_func= &evalRectForce;
dim= 6;
break;
case 4: //symplec6
odeint_func= &symplec6;
odeint_deriv_func= &evalRectForce;
dim= 6;
break;
case 5: //DOPR54
odeint_func= &bovy_dopr54;
odeint_deriv_func= &evalRectDeriv_dxdv;
dim= 12;
break;
case 6: //DOP853
odeint_func= &dop853;
odeint_deriv_func= &evalRectDeriv_dxdv;
dim= 12;
break;
}
odeint_func(odeint_deriv_func,dim,yo,nt,-9999.99,t,npot,potentialArgs,
rtol,atol,result,err);
//Free allocated memory
free_potentialArgs(npot,potentialArgs);
free(potentialArgs);
//Done!
}
// LCOV_EXCL_STOP
void evalRectForce(double t, double *q, double *a,
int nargs, struct potentialArg * potentialArgs){
double sinphi, cosphi, x, y, phi,R,Rforce,phiforce, z, zforce;
//q is rectangular so calculate R and phi
x= *q;
y= *(q+1);
z= *(q+2);
R= sqrt(x*x+y*y);
phi= acos(x/R);
sinphi= y/R;
cosphi= x/R;
if ( y < 0. ) phi= 2.*M_PI-phi;
//Calculate the forces
Rforce= calcRforce(R,z,phi,t,nargs,potentialArgs);
zforce= calczforce(R,z,phi,t,nargs,potentialArgs);
phiforce= calcPhiforce(R,z,phi,t,nargs,potentialArgs);
*a++= cosphi*Rforce-1./R*sinphi*phiforce;
*a++= sinphi*Rforce+1./R*cosphi*phiforce;
*a= zforce;
}
void evalRectDeriv(double t, double *q, double *a,
int nargs, struct potentialArg * potentialArgs){
double sinphi, cosphi, x, y, phi,R,Rforce,phiforce,z,zforce,vR,vT;
//first three derivatives are just the velocities
*a++= *(q+3);
*a++= *(q+4);
*a++= *(q+5);
//Rest is force
//q is rectangular so calculate R and phi, vR and vT (for dissipative)
x= *q;
y= *(q+1);
z= *(q+2);
R= sqrt(x*x+y*y);
phi= acos(x/R);
sinphi= y/R;
cosphi= x/R;
if ( y < 0. ) phi= 2.*M_PI-phi;
vR= *(q+3) * cosphi + *(q+4) * sinphi;
vT= -*(q+3) * sinphi + *(q+4) * cosphi;
//Calculate the forces
Rforce= calcRforce(R,z,phi,t,nargs,potentialArgs,vR,vT,*(q+5));
zforce= calczforce(R,z,phi,t,nargs,potentialArgs,vR,vT,*(q+5));
phiforce= calcPhiforce(R,z,phi,t,nargs,potentialArgs,vR,vT,*(q+5));
*a++= cosphi*Rforce-1./R*sinphi*phiforce;
*a++= sinphi*Rforce+1./R*cosphi*phiforce;
*a= zforce;
}
void initMovingObjectSplines(struct potentialArg * potentialArgs,
double ** pot_args){
gsl_interp_accel *x_accel_ptr = gsl_interp_accel_alloc();
gsl_interp_accel *y_accel_ptr = gsl_interp_accel_alloc();
gsl_interp_accel *z_accel_ptr = gsl_interp_accel_alloc();
int nPts = (int) **pot_args;
gsl_spline *x_spline = gsl_spline_alloc(gsl_interp_cspline, nPts);
gsl_spline *y_spline = gsl_spline_alloc(gsl_interp_cspline, nPts);
gsl_spline *z_spline = gsl_spline_alloc(gsl_interp_cspline, nPts);
double * t_arr = *pot_args+1;
double * x_arr = t_arr+1*nPts;
double * y_arr = t_arr+2*nPts;
double * z_arr = t_arr+3*nPts;
double * t= (double *) malloc ( nPts * sizeof (double) );
double tf = *(t_arr+4*nPts+2);
double to = *(t_arr+4*nPts+1);
int ii;
for (ii=0; ii < nPts; ii++)
*(t+ii) = (t_arr[ii]-to)/(tf-to);
gsl_spline_init(x_spline, t, x_arr, nPts);
gsl_spline_init(y_spline, t, y_arr, nPts);
gsl_spline_init(z_spline, t, z_arr, nPts);
potentialArgs->nspline1d= 3;
potentialArgs->spline1d= (gsl_spline **) malloc ( 3*sizeof ( gsl_spline *) );
potentialArgs->acc1d= (gsl_interp_accel **) \
malloc ( 3 * sizeof ( gsl_interp_accel * ) );
*potentialArgs->spline1d = x_spline;
*potentialArgs->acc1d = x_accel_ptr;
*(potentialArgs->spline1d+1)= y_spline;
*(potentialArgs->acc1d+1)= y_accel_ptr;
*(potentialArgs->spline1d+2)= z_spline;
*(potentialArgs->acc1d+2)= z_accel_ptr;
*pot_args = *pot_args + (int) (1+4*nPts);
free(t);
}
void initChandrasekharDynamicalFrictionSplines(struct potentialArg * potentialArgs,
double ** pot_args){
gsl_interp_accel *sr_accel_ptr = gsl_interp_accel_alloc();
int nPts = (int) **pot_args;
gsl_spline *sr_spline = gsl_spline_alloc(gsl_interp_cspline,nPts);
double * r_arr = *pot_args+1;
double * sr_arr = r_arr+1*nPts;
double * r= (double *) malloc ( nPts * sizeof (double) );
double ro = *(r_arr+2*nPts+14);
double rf = *(r_arr+2*nPts+15);
int ii;
for (ii=0; ii < nPts; ii++)
*(r+ii) = (r_arr[ii]-ro)/(rf-ro);
gsl_spline_init(sr_spline,r,sr_arr,nPts);
potentialArgs->nspline1d= 1;
potentialArgs->spline1d= (gsl_spline **) \
malloc ( potentialArgs->nspline1d*sizeof ( gsl_spline *) );
potentialArgs->acc1d= (gsl_interp_accel **) \
malloc ( potentialArgs->nspline1d * sizeof ( gsl_interp_accel * ) );
*potentialArgs->spline1d = sr_spline;
*potentialArgs->acc1d = sr_accel_ptr;
*pot_args = *pot_args + (int) (1+(1+potentialArgs->nspline1d)*nPts);
free(r);
}
// LCOV_EXCL_START
void evalRectDeriv_dxdv(double t, double *q, double *a,
int nargs, struct potentialArg * potentialArgs){
double sinphi, cosphi, x, y, phi,R,Rforce,phiforce,z,zforce;
double R2deriv, phi2deriv, Rphideriv, dFxdx, dFxdy, dFydx, dFydy;
//first three derivatives are just the velocities
*a++= *(q+3);
*a++= *(q+4);
*a++= *(q+5);
//Rest is force
//q is rectangular so calculate R and phi
x= *q;
y= *(q+1);
z= *(q+2);
R= sqrt(x*x+y*y);
phi= acos(x/R);
sinphi= y/R;
cosphi= x/R;
if ( y < 0. ) phi= 2.*M_PI-phi;
//Calculate the forces
Rforce= calcRforce(R,z,phi,t,nargs,potentialArgs);
zforce= calczforce(R,z,phi,t,nargs,potentialArgs);
phiforce= calcPhiforce(R,z,phi,t,nargs,potentialArgs);
*a++= cosphi*Rforce-1./R*sinphi*phiforce;
*a++= sinphi*Rforce+1./R*cosphi*phiforce;
*a++= zforce;
//dx derivatives are just dv
*a++= *(q+9);
*a++= *(q+10);
*a++= *(q+11);
//for the dv derivatives we need also R2deriv, phi2deriv, and Rphideriv
R2deriv= calcR2deriv(R,z,phi,t,nargs,potentialArgs);
phi2deriv= calcphi2deriv(R,z,phi,t,nargs,potentialArgs);
Rphideriv= calcRphideriv(R,z,phi,t,nargs,potentialArgs);
//..and dFxdx, dFxdy, dFydx, dFydy
dFxdx= -cosphi*cosphi*R2deriv
+2.*cosphi*sinphi/R/R*phiforce
+sinphi*sinphi/R*Rforce
+2.*sinphi*cosphi/R*Rphideriv
-sinphi*sinphi/R/R*phi2deriv;
dFxdy= -sinphi*cosphi*R2deriv
+(sinphi*sinphi-cosphi*cosphi)/R/R*phiforce
-cosphi*sinphi/R*Rforce
-(cosphi*cosphi-sinphi*sinphi)/R*Rphideriv
+cosphi*sinphi/R/R*phi2deriv;
dFydx= -cosphi*sinphi*R2deriv
+(sinphi*sinphi-cosphi*cosphi)/R/R*phiforce
+(sinphi*sinphi-cosphi*cosphi)/R*Rphideriv
-sinphi*cosphi/R*Rforce
+sinphi*cosphi/R/R*phi2deriv;
dFydy= -sinphi*sinphi*R2deriv
-2.*sinphi*cosphi/R/R*phiforce
-2.*sinphi*cosphi/R*Rphideriv
+cosphi*cosphi/R*Rforce
-cosphi*cosphi/R/R*phi2deriv;
*a++= dFxdx * *(q+4) + dFxdy * *(q+5);
*a++= dFydx * *(q+4) + dFydy * *(q+5);
*a= 0; //BOVY: PUT IN Z2DERIVS
}
// LCOV_EXCL_STOP
|
GB_AxB_dot2_template.c | //------------------------------------------------------------------------------
// GB_AxB_dot2_template: C=A'B, C<!M>=A'*B, or C<M>=A'*B via dot products
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// A and B are sparse, bitmap, or full; never hypersparse. If the input
// matrices A and/or B are hypersparse, they are converted into hyper_shallow
// sparse matrices, and C is converted from bitmap to sparse/hypersparse when
// done.
#if ( !GB_A_IS_HYPER && !GB_B_IS_HYPER )
{
//--------------------------------------------------------------------------
// C=A'*B, C<M>=A'*B, or C<!M>=A'*B where C is bitmap
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
for (tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
const int a_tid = tid / nbslice ;
const int b_tid = tid % nbslice ;
const int64_t kA_start = A_slice [a_tid] ;
const int64_t kA_end = A_slice [a_tid+1] ;
const int64_t kB_start = B_slice [b_tid] ;
const int64_t kB_end = B_slice [b_tid+1] ;
int64_t task_cnvals = 0 ;
//----------------------------------------------------------------------
// C=A'*B, C<M>=A'*B, or C<!M>=A'*B via dot products
//----------------------------------------------------------------------
for (int64_t j = kB_start ; j < kB_end ; j++)
{
//------------------------------------------------------------------
// get B(:,j) and C(:,j)
//------------------------------------------------------------------
const int64_t pC_start = j * cvlen ;
#if GB_B_IS_SPARSE
// B is sparse (never hypersparse)
const int64_t pB_start = Bp [j] ;
const int64_t pB_end = Bp [j+1] ;
const int64_t bjnz = pB_end - pB_start ;
if (bjnz == 0)
{
// no work to do if B(:,j) is empty, except to clear Cb
memset (&Cb [pC_start + kA_start], 0, kA_end - kA_start) ;
continue ;
}
#if GB_A_IS_SPARSE
// Both A and B are sparse; get first and last in B(:,j)
const int64_t ib_first = Bi [pB_start] ;
const int64_t ib_last = Bi [pB_end-1] ;
#endif
#else
// B is bitmap or full
const int64_t pB_start = j * vlen ;
#endif
//------------------------------------------------------------------
// C(:,j)<#M(:,j)> = A'*B(:,j), or C(:,j) = A'*B(:,j) if no mask
//------------------------------------------------------------------
for (int64_t i = kA_start ; i < kA_end ; i++)
{
//--------------------------------------------------------------
// get C(i,j), M(i,j), and clear the C(i,j) bitmap
//--------------------------------------------------------------
int64_t pC = pC_start + i ; // C is bitmap
#if defined ( GB_ANY_SPECIALIZED )
// M is bitmap and structural; Mask_comp true
Cb [pC] = 0 ;
if (!Mb [pC])
#elif defined ( GB_MASK_IS_PRESENT )
bool mij ;
if (M_is_bitmap)
{
// M is bitmap
mij = Mb [pC] && GB_mcast (Mx, pC, msize) ;
}
else if (M_is_full)
{
// M is full
mij = GB_mcast (Mx, pC, msize) ;
}
else // M is sparse or hyper
{
// M has been scattered into the C bitmap
mij = (Cb [pC] > 1) ;
}
Cb [pC] = 0 ;
if (mij ^ Mask_comp)
#else
// M is not present
Cb [pC] = 0 ;
#endif
{
//----------------------------------------------------------
// the mask allows C(i,j) to be computed
//----------------------------------------------------------
#if GB_A_IS_SPARSE
// A is sparse
int64_t pA = Ap [i] ;
const int64_t pA_end = Ap [i+1] ;
const int64_t ainz = pA_end - pA ;
if (ainz > 0)
#else
// A is bitmap or full
const int64_t pA = i * vlen ;
#endif
{
// C(i,j) = A(:,i)'*B(:,j)
bool cij_exists = false ;
GB_CIJ_DECLARE (cij) ;
#include "GB_AxB_dot_cij.c"
}
}
}
}
cnvals += task_cnvals ;
}
}
#endif
#undef GB_A_IS_SPARSE
#undef GB_A_IS_HYPER
#undef GB_A_IS_BITMAP
#undef GB_A_IS_FULL
#undef GB_B_IS_SPARSE
#undef GB_B_IS_HYPER
#undef GB_B_IS_BITMAP
#undef GB_B_IS_FULL
|
elastic_avx.h | //*****************************************************************************
// Title : src/equation_avx/elastic_avx.h
// Author : Tanabe Yuta
// Date : 2021/02/13
// Copyright : (C)2021 TanabeYuta
//*****************************************************************************
#pragma once
#include <immintrin.h>
// compile option for g++(MinGW) : -mavx
namespace PANSLBM2 {
namespace EL {
template<class T, template<class>class P>void Macro(T, T &, T &, T &, T &, T &, T &, const T *, int); // Function of updating macroscopic values of EL for 2D
template<class T, template<class>class P>void Macro(T, T &, T &, T &, T &, T &, T &, const T *, T, int); // Function of updating macroscopic values of EL with topology optimization for 2D
template<class T, template<class>class P>void Equilibrium(T *, T, T, T, T, T, T, T); // Function of getting equilibrium of EL for 2D
// Function of updating macroscopic values of EL for 2D
template<class P>
void Macro(const __m256d &__rho, __m256d &__ux, __m256d & __uy, __m256d &__sxx, __m256d &__sxy, __m256d &__syx, __m256d &__syy, const __m256d *__f) {
__ux = _mm256_setzero_pd();
__uy = _mm256_setzero_pd();
__sxx = _mm256_setzero_pd();
__sxy = _mm256_setzero_pd();
__syx = _mm256_setzero_pd();
__syy = _mm256_setzero_pd();
for (int c = 1; c < P::nc; ++c) {
__ux = _mm256_add_pd(__ux, _mm256_mul_pd(P::__cx[c], __f[c]));
__uy = _mm256_add_pd(__uy, _mm256_mul_pd(P::__cy[c], __f[c]));
__sxx = _mm256_sub_pd(__sxx, _mm256_mul_pd(_mm256_mul_pd(P::__cx[c], P::__cx[c]), __f[c]));
__sxy = _mm256_sub_pd(__sxy, _mm256_mul_pd(_mm256_mul_pd(P::__cx[c], P::__cy[c]), __f[c]));
__syx = _mm256_sub_pd(__syx, _mm256_mul_pd(_mm256_mul_pd(P::__cy[c], P::__cx[c]), __f[c]));
__syy = _mm256_sub_pd(__syy, _mm256_mul_pd(_mm256_mul_pd(P::__cy[c], P::__cy[c]), __f[c]));
}
__m256d __invrho = _mm256_div_pd(_mm256_set1_pd(1.0), __rho);
__ux = _mm256_mul_pd(__ux, __invrho);
__uy = _mm256_mul_pd(__uy, __invrho);
}
// Function of updating macroscopic values of EL for 2D
template<class P>
void Macro(const __m256d &__rho, __m256d &__ux, __m256d & __uy, __m256d &__sxx, __m256d &__sxy, __m256d &__syx, __m256d &__syy, const __m256d *__f, const __m256d &__gamma) {
Macro<P>(__rho, __ux, __uy, __sxx, __sxy, __syx, __syy, __f);
__sxx = _mm256_mul_pd(__sxx, __gamma);
__sxy = _mm256_mul_pd(__sxy, __gamma);
__syx = _mm256_mul_pd(__syx, __gamma);
__syy = _mm256_mul_pd(__syy, __gamma);
}
// Function of getting equilibrium of EL for 2D
template<class P>
void Equilibrium(__m256d *__feq, const __m256d &__rho, const __m256d &__ux, const __m256d &__uy, const __m256d &__sxx, const __m256d &__sxy, const __m256d &__syx, const __m256d &__syy) {
__m256d __trs = _mm256_add_pd(__sxx, __syy);
for (int c = 0; c < P::nc; ++c) {
__m256d __cu = _mm256_add_pd(_mm256_mul_pd(P::__cx[c], __ux), _mm256_mul_pd(P::__cy[c], __uy));
__m256d __csc = _mm256_add_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_mul_pd(P::__cx[c], P::__cx[c]), __sxx), _mm256_mul_pd(_mm256_mul_pd(P::__cx[c], P::__cy[c]), __sxy)), _mm256_add_pd(_mm256_mul_pd(_mm256_mul_pd(P::__cy[c], P::__cx[c]), __syx), _mm256_mul_pd(_mm256_mul_pd(P::__cy[c], P::__cy[c]), __syy)));
__feq[c] = _mm256_mul_pd(__ei[c], _mm256_add_pd(_mm256_sub_pd(_mm256_mul_pd(_mm256_set1_pd(3.0), _mm256_mul_pd(__rho, __cu)), _mm256_mul_pd(_mm256_set1_pd(4.5), __csc)), _mm256_mul_pd(_mm256_set1_pd(1.5), __trs)));
}
}
// Function of Update macro and Collide of NS for 2D
template<template<class>class P>
void MacroCollide(P<double>& _p, double *_rho, double *_ux, double *_uy, double *_sxx, double *_sxy, double *_syx, double *_syy, double _tau, bool _issave = false) {
const int ne = _p.nxyz/P<double>::packsize;
double omega = 1.0/_tau, iomega = 1.0 - omega, feq[P<double>::nc];
__m256d __omega = _mm256_set1_pd(omega), __iomega = _mm256_set1_pd(iomega), __feq[P<double>::nc];
#pragma omp parallel for private(__feq)
for (int pidx = 0; pidx < ne; ++pidx) {
int idx = pidx*P<double>::packsize;
// Pack f0, f and rho
__m256d __f[P<double>::nc];
_p.LoadF(idx, __f);
__m256d __rho = _mm256_loadu_pd(&_rho[idx]);
// Update macro
__m256d __ux, __uy, __sxx, __sxy, __syx, __syy;
Macro<P<double> >(__rho, __ux, __uy, __sxx, __sxy, __syx, __syy, __f);
// Save macro if need
if (_issave) {
_mm256_storeu_pd(&_ux[idx], __ux);
_mm256_storeu_pd(&_uy[idx], __uy);
_mm256_storeu_pd(&_sxx[idx], __sxx);
_mm256_storeu_pd(&_sxy[idx], __sxy);
_mm256_storeu_pd(&_syx[idx], __syx);
_mm256_storeu_pd(&_syy[idx], __syy);
}
// Collide
Equilibrium<P<double> >(__feq, __rho, __ux, __uy, __sxx, __sxy, __syx, __syy);
for (int c = 0; c < P<double>::nc; ++c) {
__f[c] = _mm256_add_pd(_mm256_mul_pd(__iomega, __f[c]), _mm256_mul_pd(__omega, __feq[c]));
}
_p.StoreF(idx, __f);
}
for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) {
// Update macro
T ux, uy, sxx, sxy, syx, syy;
Macro<double, P>(_rho[idx], ux, uy, sxx, sxy, syx, syy, _p.f, idx);
// Save macro if need
if (_issave) {
_ux[idx] = ux;
_uy[idx] = uy;
_sxx[idx] = sxx;
_sxy[idx] = sxy;
_syx[idx] = syx;
_syy[idx] = syy;
}
// Collide
Equilibrium<double, P>(feq, _rho[idx], ux, uy, sxx, sxy, syx, syy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<double>::nc; ++c) {
int idxf = P<double>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
// Function of Update macro and Collide of NS for 2D
template<template<class>class P>
void MacroExtendedCollide(P<double>& _p, double *_rho, double *_ux, double *_uy, double *_sxx, double *_sxy, double *_syx, double *_syy, double _tau, const T *_gamma, bool _issave = false) {
const int ne = _p.nxyz/P<double>::packsize;
double omega = 1.0/_tau, iomega = 1.0 - omega, feq[P<double>::nc];
__m256d __omega = _mm256_set1_pd(omega), __iomega = _mm256_set1_pd(iomega), __feq[P<double>::nc];
#pragma omp parallel for private(__feq)
for (int pidx = 0; pidx < ne; ++pidx) {
int idx = pidx*P<double>::packsize;
// Pack f0, f and rho
__m256d __f[P<double>::nc];
_p.LoadF(idx, __f);
__m256d __rho = _mm256_loadu_pd(&_rho[idx]), __gamma = _mm256_loadu_pd(&_gamma[idx]);
// Update macro
__m256d __ux, __uy, __sxx, __sxy, __syx, __syy;
Macro<P<double> >(__rho, __ux, __uy, __sxx, __sxy, __syx, __syy, __f, __gamma);
// Save macro if need
if (_issave) {
_mm256_storeu_pd(&_ux[idx], __ux);
_mm256_storeu_pd(&_uy[idx], __uy);
_mm256_storeu_pd(&_sxx[idx], __sxx);
_mm256_storeu_pd(&_sxy[idx], __sxy);
_mm256_storeu_pd(&_syx[idx], __syx);
_mm256_storeu_pd(&_syy[idx], __syy);
}
// Collide
Equilibrium<P<double> >(__feq, __rho, __ux, __uy, __sxx, __sxy, __syx, __syy);
for (int c = 0; c < P<double>::nc; ++c) {
__f[c] = _mm256_add_pd(_mm256_mul_pd(__iomega, __f[c]), _mm256_mul_pd(__omega, __feq[c]));
}
_p.StoreF(idx, __f);
}
for (int idx = ne*P<double>::packsize; idx < _p.nxyz; ++idx) {
// Update macro
double ux, uy, sxx, sxy, syx, syy;
Macro<double, P>(_rho[idx], ux, uy, sxx, sxy, syx, syy, _p.f, _gamma[idx], idx);
// Save macro if need
if (_issave) {
_ux[idx] = ux;
_uy[idx] = uy;
_sxx[idx] = sxx;
_sxy[idx] = sxy;
_syx[idx] = syx;
_syy[idx] = syy;
}
// Collide
Equilibrium<double, P>(feq, _rho[idx], ux, uy, sxx, sxy, syx, syy);
_p.f0[idx] = iomega*_p.f0[idx] + omega*feq[0];
for (int c = 1; c < P<double>::nc; ++c) {
int idxf = P<double>::IndexF(idx, c);
_p.f[idxf] = iomega*_p.f[idxf] + omega*feq[c];
}
}
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.